From 58737770465e51f66c4e5c3d6794322c23ea0427 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Nov 2025 13:20:39 +0000 Subject: [PATCH] Bump github.com/thanos-io/thanos Bumps [github.com/thanos-io/thanos](https://github.com/thanos-io/thanos) from 0.39.3-0.20250729120336-88d0ae8071cb to 0.40.1. - [Release notes](https://github.com/thanos-io/thanos/releases) - [Changelog](https://github.com/thanos-io/thanos/blob/v0.40.1/CHANGELOG.md) - [Commits](https://github.com/thanos-io/thanos/commits/v0.40.1) --- updated-dependencies: - dependency-name: github.com/thanos-io/thanos dependency-version: 0.40.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 7 +- go.sum | 10 +- .../VictoriaMetrics/easyproto/LICENSE | 190 ++ .../VictoriaMetrics/easyproto/README.md | 225 ++ .../VictoriaMetrics/easyproto/doc.go | 3 + .../VictoriaMetrics/easyproto/reader.go | 739 +++++++ .../VictoriaMetrics/easyproto/writer.go | 718 ++++++ .../thanos-io/objstore/CHANGELOG.md | 1 + vendor/github.com/thanos-io/objstore/inmem.go | 18 + .../github.com/thanos-io/objstore/objstore.go | 2 +- .../thanos-io/objstore/providers/gcs/gcs.go | 2 +- .../thanos-io/thanos/pkg/block/block.go | 28 +- .../thanos-io/thanos/pkg/block/fetcher.go | 114 +- .../pkg/block/indexheader/reader_pool.go | 2 +- .../thanos/pkg/block/metadata/meta.go | 5 + .../thanos/pkg/cacheutil/async_op.go | 2 +- .../thanos/pkg/cacheutil/cacheutil.go | 5 +- .../thanos/pkg/cacheutil/redis_client.go | 5 +- .../thanos/pkg/clientconfig/config.go | 2 +- .../thanos-io/thanos/pkg/clientconfig/http.go | 5 +- .../thanos/pkg/compact/blocks_cleaner.go | 10 +- .../thanos-io/thanos/pkg/compact/clean.go | 58 +- .../thanos-io/thanos/pkg/compact/compact.go | 73 +- .../pkg/compact/downsample/downsample.go | 31 +- .../thanos/pkg/compact/downsample/pool.go | 2 +- .../thanos-io/thanos/pkg/compact/planner.go | 9 +- .../thanos-io/thanos/pkg/dedup/chunk_iter.go | 4 +- .../thanos/pkg/discovery/dns/grpc.go | 50 +- .../thanos/pkg/discovery/dns/provider.go | 5 +- .../thanos/pkg/discovery/memcache/provider.go | 5 +- .../thanos/pkg/discovery/memcache/resolver.go | 4 +- .../thanos/pkg/extgrpc/snappy/snappy.go | 4 +- .../thanos-io/thanos/pkg/extpromql/parser.go | 9 +- .../thanos/pkg/info/infopb/rpc.pb.go | 254 ++- .../thanos/pkg/info/infopb/rpc.proto | 7 + .../thanos-io/thanos/pkg/model/units.go | 4 +- .../thanos/pkg/promclient/promclient.go | 25 +- .../thanos-io/thanos/pkg/query/endpointset.go | 54 +- .../thanos-io/thanos/pkg/query/iter.go | 18 +- .../thanos-io/thanos/pkg/query/querier.go | 4 - .../thanos/pkg/query/remote_engine.go | 2 +- .../thanos/pkg/querysharding/analyzer.go | 11 +- .../thanos-io/thanos/pkg/runutil/runutil.go | 8 +- .../thanos-io/thanos/pkg/shipper/shipper.go | 2 +- .../thanos/pkg/status/statuspb/custom.go | 96 + .../thanos/pkg/status/statuspb/rpc.pb.go | 1949 +++++++++++++++++ .../thanos/pkg/status/statuspb/rpc.proto | 81 + .../thanos-io/thanos/pkg/store/bucket.go | 28 +- .../thanos-io/thanos/pkg/store/cache/cache.go | 2 +- .../thanos/pkg/store/cache/caching_bucket.go | 6 +- .../pkg/store/cache/caching_bucket_factory.go | 2 +- .../thanos/pkg/store/cache/factory.go | 2 +- .../thanos/pkg/store/cache/matchers_cache.go | 2 +- .../thanos/pkg/store/labelpb/label.go | 88 +- .../thanos-io/thanos/pkg/store/postings.go | 6 +- .../thanos-io/thanos/pkg/store/prometheus.go | 14 +- .../thanos-io/thanos/pkg/store/proxy.go | 5 +- .../thanos-io/thanos/pkg/store/proxy_merge.go | 7 +- .../pkg/store/storepb/prompb/samples.go | 4 +- .../thanos-io/thanos/pkg/store/tsdb.go | 9 +- .../thanos-io/thanos/pkg/strutil/merge.go | 5 +- .../thanos/pkg/testutil/e2eutil/prometheus.go | 19 +- .../thanos-io/thanos/pkg/tracing/grpc.go | 4 +- .../thanos/pkg/tracing/interceptors/client.go | 6 +- .../pkg/tracing/interceptors/reporter.go | 14 +- .../thanos/pkg/tracing/interceptors/server.go | 8 +- .../thanos/pkg/tracing/migration/bridge.go | 4 +- .../pkg/tracing/tracing_middleware/client.go | 6 +- .../pkg/tracing/tracing_middleware/server.go | 6 +- vendor/modules.txt | 10 +- 70 files changed, 4749 insertions(+), 370 deletions(-) create mode 100644 vendor/github.com/VictoriaMetrics/easyproto/LICENSE create mode 100644 vendor/github.com/VictoriaMetrics/easyproto/README.md create mode 100644 vendor/github.com/VictoriaMetrics/easyproto/doc.go create mode 100644 vendor/github.com/VictoriaMetrics/easyproto/reader.go create mode 100644 vendor/github.com/VictoriaMetrics/easyproto/writer.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/status/statuspb/custom.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/status/statuspb/rpc.pb.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/status/statuspb/rpc.proto diff --git a/go.mod b/go.mod index ebc992f170b..3a32fe94e0a 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/cortexproject/cortex -go 1.24.0 +go 1.25.0 require ( github.com/Masterminds/squirrel v1.5.4 @@ -49,9 +49,9 @@ require ( github.com/sony/gobreaker v1.0.0 github.com/spf13/afero v1.11.0 github.com/stretchr/testify v1.11.1 - github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3 + github.com/thanos-io/objstore v0.0.0-20250804093838-71d60dfee488 github.com/thanos-io/promql-engine v0.0.0-20250924193140-e9123dc11264 - github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb + github.com/thanos-io/thanos v0.40.1 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5 go.etcd.io/etcd/api/v3 v3.5.17 @@ -114,6 +114,7 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.52.0 // indirect + github.com/VictoriaMetrics/easyproto v0.1.4 // indirect github.com/alecthomas/kingpin/v2 v2.4.0 // indirect github.com/andybalholm/brotli v1.1.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect diff --git a/go.sum b/go.sum index 9fc1706b026..a5d91bfa6d3 100644 --- a/go.sum +++ b/go.sum @@ -824,6 +824,8 @@ github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8 github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc= +github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710= github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= @@ -1772,12 +1774,12 @@ github.com/tencentyun/cos-go-sdk-v5 v0.7.66 h1:O4O6EsozBoDjxWbltr3iULgkI7WPj/BFN github.com/tencentyun/cos-go-sdk-v5 v0.7.66/go.mod h1:8+hG+mQMuRP/OIS9d83syAvXvrMj9HhkND6Q1fLghw0= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3 h1:P301Anc27aVL7Ls88el92j+qW3PJp8zmiDl+kOUZv3A= -github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3/go.mod h1:uDHLkMKOGDAnlN75EAz8VrRzob1+VbgYSuUleatWuF0= +github.com/thanos-io/objstore v0.0.0-20250804093838-71d60dfee488 h1:khBsQLLRoF1KzXgTlwFZa6mC32bwYUUAu/AeP49V7UM= +github.com/thanos-io/objstore v0.0.0-20250804093838-71d60dfee488/go.mod h1:uDHLkMKOGDAnlN75EAz8VrRzob1+VbgYSuUleatWuF0= github.com/thanos-io/promql-engine v0.0.0-20250924193140-e9123dc11264 h1:sOmANo4XVhem4VgvI9w05DBwqMex/qw+cDjuHW2FKWw= github.com/thanos-io/promql-engine v0.0.0-20250924193140-e9123dc11264/go.mod h1:MOFN0M1nDMcWZg1t4iF39sOard/K4SWgO/HHSODeDIc= -github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb h1:z/ePbn3lo/D4vdHGH8hpa2kgH9M6iLq0kOFtZwuelKM= -github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb/go.mod h1:gGUG3TDEoRSjTFVs/QO6QnQIILRgNF0P9l7BiiMfmHw= +github.com/thanos-io/thanos v0.40.1 h1:osjcmfQTXGoFeXTVLR56+RQo7bdtQmsNem73rflYwb0= +github.com/thanos-io/thanos v0.40.1/go.mod h1:TI/hLlnHK5XuHsycq5jNiQ9VrtjjejbG3lG2U3JJr1Y= github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tjhop/slog-gokit v0.1.4 h1:uj/vbDt3HaF0Py8bHPV4ti/s0utnO0miRbO277FLBKM= diff --git a/vendor/github.com/VictoriaMetrics/easyproto/LICENSE b/vendor/github.com/VictoriaMetrics/easyproto/LICENSE new file mode 100644 index 00000000000..c6b28e5afe6 --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/easyproto/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2023-2024 VictoriaMetrics, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/VictoriaMetrics/easyproto/README.md b/vendor/github.com/VictoriaMetrics/easyproto/README.md new file mode 100644 index 00000000000..f601a09517b --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/easyproto/README.md @@ -0,0 +1,225 @@ +[![GoDoc](https://godoc.org/github.com/VictoriaMetrics/easyproto?status.svg)](http://godoc.org/github.com/VictoriaMetrics/easyproto) + +# easyproto + +Package [github.com/VictoriaMetrics/easyproto](http://godoc.org/github.com/VictoriaMetrics/easyproto) provides simple building blocks +for marshaling and unmarshaling of [protobuf](https://protobuf.dev/) messages with [proto3 encoding](https://protobuf.dev/programming-guides/encoding/). + +## Features + +- There is no need in [protoc](https://grpc.io/docs/protoc-installation/) or [go generate](https://go.dev/blog/generate) - + just write simple maintainable code for marshaling and unmarshaling protobuf messages. +- `easyproto` doesn't increase your binary size by tens of megabytes unlike traditional `protoc`-combiled code may do. +- `easyproto` allows writing zero-alloc code for marshaling and unmarshaling of arbitrary complex protobuf messages. See [examples](#examples). + +## Restrictions + +- It supports only [proto3 encoding](https://protobuf.dev/programming-guides/encoding/), e.g. it doesn't support `proto2` encoding + features such as [proto2 groups](https://protobuf.dev/programming-guides/proto2/#groups). +- It doesn't provide helpers for marshaling and unmarshaling of [well-known types](https://protobuf.dev/reference/protobuf/google.protobuf/), + since they aren't used too much in practice. + +## Examples + +Suppose you need marshaling and unmarshaling of the following `timeseries` message: + +```proto +message timeseries { + string name = 1; + repeated sample samples = 2; +} + +message sample { + double value = 1; + int64 timestamp = 2; +} +``` + +At first let's create the corresponding data structures in Go: + +```go +type Timeseries struct { + Name string + Samples []Sample +} + +type Sample struct { + Value float64 + Timestamp int64 +} +``` + +Since you write the code on yourself without any `go generate` and `protoc` invocations, +you are free to use arbitrary fields and methods in these structs. You can also specify the most suitable types for these fields. +For example, the `Sample` struct may be written as the following if you need an ability to detect empty values and timestamps: + +```go +type Sample struct { + Value *float64 + Timestamp *int64 +} +``` + +* [How to marshal `Timeseries` struct to protobuf message](#marshaling) +* [How to unmarshal protobuf message to `Timeseries` struct](#unmarshaling) + +### Marshaling + +The following code can be used for marshaling `Timeseries` struct to protobuf message: + +```go +import ( + "github.com/VictoriaMetrics/easyproto" +) + +// MarshalProtobuf marshals ts into protobuf message, appends this message to dst and returns the result. +// +// This function doesn't allocate memory on repeated calls. +func (ts *Timeseries) MarshalProtobuf(dst []byte) []byte { + m := mp.Get() + ts.marshalProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + mp.Put(m) + return dst +} + +func (ts *Timeseries) marshalProtobuf(mm *easyproto.MessageMarshaler) { + mm.AppendString(1, ts.Name) + for _, s := range ts.Samples { + s.marshalProtobuf(mm.AppendMessage(2)) + } +} + +func (s *Sample) marshalProtobuf(mm *easyproto.MessageMarshaler) { + mm.AppendDouble(1, s.Value) + mm.AppendInt64(2, s.Timestamp) +} + +var mp easyproto.MarshalerPool +``` + +Note that you are free to modify this code according to your needs, since you write and maintain it. +For example, you can construct arbitrary protobuf messages on the fly without the need to prepare the source struct for marshaling: + +```go +func CreateProtobufMessageOnTheFly() []byte { + // Dynamically construct timeseries message with 10 samples + var m easyproto.Marshaler + mm := m.MessageMarshaler() + mm.AppendString(1, "foo") + for i := 0; i < 10; i++ { + mmSample := mm.AppendMessage(2) + mmSample.AppendDouble(1, float64(i)/10) + mmSample.AppendInt64(2, int64(i)*1000) + } + return m.Marshal(nil) +} +``` + +This may be useful in tests. + +### Unmarshaling + +The following code can be used for unmarshaling [`timeseries` message](#examples) into `Timeseries` struct: + +```go +// UnmarshalProtobuf unmarshals ts from protobuf message at src. +func (ts *Timeseries) UnmarshalProtobuf(src []byte) (err error) { + // Set default Timeseries values + ts.Name = "" + ts.Samples = ts.Samples[:0] + + // Parse Timeseries message at src + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in Timeseries message") + } + switch fc.FieldNum { + case 1: + name, ok := fc.String() + if !ok { + return fmt.Errorf("cannot read Timeseries name") + } + // name refers to src. This means that the name changes when src changes. + // Make a copy with strings.Clone(name) if needed. + ts.Name = name + case 2: + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot read Timeseries sample data") + } + ts.Samples = append(ts.Samples, Sample{}) + s := &ts.Samples[len(ts.Samples)-1] + if err := s.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("cannot unmarshal sample: %w", err) + } + } + } + return nil +} + +// UnmarshalProtobuf unmarshals s from protobuf message at src. +func (s *Sample) UnmarshalProtobuf(src []byte) (err error) { + // Set default Sample values + s.Value = 0 + s.Timestamp = 0 + + // Parse Sample message at src + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in sample") + } + switch fc.FieldNum { + case 1: + value, ok := fc.Double() + if !ok { + return fmt.Errorf("cannot read sample value") + } + s.Value = value + case 2: + timestamp, ok := fc.Int64() + if !ok { + return fmt.Errorf("cannot read sample timestamp") + } + s.Timestamp = timestamp + } + } + return nil +} +``` + +You are free to modify this code according to your needs, since you wrote it and you maintain it. + +It is possible to extract the needed data from arbitrary protobuf messages without the need to create a destination struct. +For example, the following code extracts `timeseries` name from protobuf message, while ignoring all the other fields: + +```go +func GetTimeseriesName(src []byte) (name string, err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if src != nil { + return "", fmt.Errorf("cannot read the next field") + } + if fc.FieldNum == 1 { + name, ok := fc.String() + if !ok { + return "", fmt.Errorf("cannot read timeseries name") + } + // Return a copy of name, since name refers to src. + return strings.Clone(name), nil + } + } + return "", fmt.Errorf("timeseries name isn't found in the message") +} +``` + +## Users + +`easyproto` is used in the following projects: + +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) diff --git a/vendor/github.com/VictoriaMetrics/easyproto/doc.go b/vendor/github.com/VictoriaMetrics/easyproto/doc.go new file mode 100644 index 00000000000..036f825878a --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/easyproto/doc.go @@ -0,0 +1,3 @@ +// Package easyproto provides building blocks for marshaling and unmarshaling protobuf v3 messages +// according to https://protobuf.dev/programming-guides/encoding/ . +package easyproto diff --git a/vendor/github.com/VictoriaMetrics/easyproto/reader.go b/vendor/github.com/VictoriaMetrics/easyproto/reader.go new file mode 100644 index 00000000000..c525c1f17af --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/easyproto/reader.go @@ -0,0 +1,739 @@ +package easyproto + +import ( + "encoding/binary" + "fmt" + "math" + "unsafe" +) + +// FieldContext represents a single protobuf-encoded field after NextField() call. +type FieldContext struct { + // FieldNum is the number of protobuf field read after NextField() call. + FieldNum uint32 + + // wireType is the wire type for the given field + wireType wireType + + // data is probobuf-encoded field data for wireType=wireTypeLen + data []byte + + // intValue contains int value for wireType!=wireTypeLen + intValue uint64 +} + +// NextField reads the next field from protobuf-encoded src. +// +// It returns the tail left after reading the next field from src. +// +// It is unsafe modifying src while FieldContext is in use. +func (fc *FieldContext) NextField(src []byte) ([]byte, error) { + if len(src) >= 2 { + n := uint16(src[0])<<8 | uint16(src[1]) + if (n&0x8080 == 0) && (n&0x0700 == (uint16(wireTypeLen) << 8)) { + // Fast path - read message with the length smaller than 0x80 bytes. + msgLen := int(n & 0xff) + src = src[2:] + if len(src) < msgLen { + return src, fmt.Errorf("cannot read field for from %d bytes; need at least %d bytes", len(src), msgLen) + } + fc.FieldNum = uint32(n >> (8 + 3)) + fc.wireType = wireTypeLen + fc.data = src[:msgLen] + src = src[msgLen:] + return src, nil + } + } + + // Read field tag. See https://protobuf.dev/programming-guides/encoding/#structure + if len(src) == 0 { + return src, fmt.Errorf("cannot unmarshal field from empty message") + } + + var fieldNum uint64 + tag := uint64(src[0]) + if tag < 0x80 { + src = src[1:] + fieldNum = tag >> 3 + } else { + var offset int + tag, offset = binary.Uvarint(src) + if offset <= 0 { + return src, fmt.Errorf("cannot unmarshal field tag from uvarint") + } + src = src[offset:] + fieldNum = tag >> 3 + if fieldNum > math.MaxUint32 { + return src, fmt.Errorf("fieldNum=%d is bigger than uint32max=%d", fieldNum, uint64(math.MaxUint32)) + } + } + + wt := wireType(tag & 0x07) + + fc.FieldNum = uint32(fieldNum) + fc.wireType = wt + + // Read the remaining data + if wt == wireTypeLen { + u64, offset := binary.Uvarint(src) + if offset <= 0 { + return src, fmt.Errorf("cannot read message length for field #%d", fieldNum) + } + src = src[offset:] + if uint64(len(src)) < u64 { + return src, fmt.Errorf("cannot read data for field #%d from %d bytes; need at least %d bytes", fieldNum, len(src), u64) + } + fc.data = src[:u64] + src = src[u64:] + return src, nil + } + if wt == wireTypeVarint { + u64, offset := binary.Uvarint(src) + if offset <= 0 { + return src, fmt.Errorf("cannot read varint after field tag for field #%d", fieldNum) + } + src = src[offset:] + fc.intValue = u64 + return src, nil + } + if wt == wireTypeI64 { + if len(src) < 8 { + return src, fmt.Errorf("cannot read i64 for field #%d", fieldNum) + } + u64 := binary.LittleEndian.Uint64(src) + src = src[8:] + fc.intValue = u64 + return src, nil + } + if wt == wireTypeI32 { + if len(src) < 4 { + return src, fmt.Errorf("cannot read i32 for field #%d", fieldNum) + } + u32 := binary.LittleEndian.Uint32(src) + src = src[4:] + fc.intValue = uint64(u32) + return src, nil + } + return src, fmt.Errorf("unknown wireType=%d", wt) +} + +// UnmarshalMessageLen unmarshals protobuf message length from src. +// +// It returns the tail left after unmarshaling message length from src. +// +// It is expected that src is marshaled with Marshaler.MarshalWithLen(). +// +// False is returned if message length cannot be unmarshaled from src. +func UnmarshalMessageLen(src []byte) (int, []byte, bool) { + u64, offset := binary.Uvarint(src) + if offset <= 0 { + return 0, src, false + } + src = src[offset:] + if u64 > math.MaxInt32 { + return 0, src, false + } + return int(u64), src, true +} + +// wireType is the type of of protobuf-encoded field +// +// See https://protobuf.dev/programming-guides/encoding/#structure +type wireType byte + +const ( + // VARINT type - one of int32, int64, uint32, uint64, sint32, sint64, bool, enum + wireTypeVarint = wireType(0) + + // I64 type + wireTypeI64 = wireType(1) + + // Len type + wireTypeLen = wireType(2) + + // I32 type + wireTypeI32 = wireType(5) +) + +// Int32 returns int32 value for fc. +// +// False is returned if fc doesn't contain int32 value. +func (fc *FieldContext) Int32() (int32, bool) { + if fc.wireType != wireTypeVarint { + return 0, false + } + return getInt32(fc.intValue) +} + +// Int64 returns int64 value for fc. +// +// False is returned if fc doesn't contain int64 value. +func (fc *FieldContext) Int64() (int64, bool) { + if fc.wireType != wireTypeVarint { + return 0, false + } + return int64(fc.intValue), true +} + +// Uint32 returns uint32 value for fc. +// +// False is returned if fc doesn't contain uint32 value. +func (fc *FieldContext) Uint32() (uint32, bool) { + if fc.wireType != wireTypeVarint { + return 0, false + } + return getUint32(fc.intValue) +} + +// Uint64 returns uint64 value for fc. +// +// False is returned if fc doesn't contain uint64 value. +func (fc *FieldContext) Uint64() (uint64, bool) { + if fc.wireType != wireTypeVarint { + return 0, false + } + return fc.intValue, true +} + +// Sint32 returns sint32 value for fc. +// +// False is returned if fc doesn't contain sint32 value. +func (fc *FieldContext) Sint32() (int32, bool) { + if fc.wireType != wireTypeVarint { + return 0, false + } + u32, ok := getUint32(fc.intValue) + if !ok { + return 0, false + } + i32 := decodeZigZagInt32(u32) + return i32, true +} + +// Sint64 returns sint64 value for fc. +// +// False is returned if fc doesn't contain sint64 value. +func (fc *FieldContext) Sint64() (int64, bool) { + if fc.wireType != wireTypeVarint { + return 0, false + } + i64 := decodeZigZagInt64(fc.intValue) + return i64, true +} + +// Bool returns bool value for fc. +// +// False is returned in the second result if fc doesn't contain bool value. +func (fc *FieldContext) Bool() (bool, bool) { + if fc.wireType != wireTypeVarint { + return false, false + } + return getBool(fc.intValue) +} + +// Fixed64 returns fixed64 value for fc. +// +// False is returned if fc doesn't contain fixed64 value. +func (fc *FieldContext) Fixed64() (uint64, bool) { + if fc.wireType != wireTypeI64 { + return 0, false + } + return fc.intValue, true +} + +// Sfixed64 returns sfixed64 value for fc. +// +// False is returned if fc doesn't contain sfixed64 value. +func (fc *FieldContext) Sfixed64() (int64, bool) { + if fc.wireType != wireTypeI64 { + return 0, false + } + return int64(fc.intValue), true +} + +// Double returns dobule value for fc. +// +// False is returned if fc doesn't contain double value. +func (fc *FieldContext) Double() (float64, bool) { + if fc.wireType != wireTypeI64 { + return 0, false + } + v := math.Float64frombits(fc.intValue) + return v, true +} + +// String returns string value for fc. +// +// The returned string is valid while the underlying buffer isn't changed. +// +// False is returned if fc doesn't contain string value. +func (fc *FieldContext) String() (string, bool) { + if fc.wireType != wireTypeLen { + return "", false + } + s := unsafeBytesToString(fc.data) + return s, true +} + +// Bytes returns bytes value for fc. +// +// The returned byte slice is valid while the underlying buffer isn't changed. +// +// False is returned if fc doesn't contain bytes value. +func (fc *FieldContext) Bytes() ([]byte, bool) { + if fc.wireType != wireTypeLen { + return nil, false + } + return fc.data, true +} + +// MessageData returns protobuf message data for fc. +// +// False is returned if fc doesn't contain message data. +func (fc *FieldContext) MessageData() ([]byte, bool) { + if fc.wireType != wireTypeLen { + return nil, false + } + return fc.data, true +} + +// Fixed32 returns fixed32 value for fc. +// +// False is returned if fc doesn't contain fixed32 value. +func (fc *FieldContext) Fixed32() (uint32, bool) { + if fc.wireType != wireTypeI32 { + return 0, false + } + u32 := mustGetUint32(fc.intValue) + return u32, true +} + +// Sfixed32 returns sfixed32 value for fc. +// +// False is returned if fc doesn't contain sfixed value. +func (fc *FieldContext) Sfixed32() (int32, bool) { + if fc.wireType != wireTypeI32 { + return 0, false + } + i32 := mustGetInt32(fc.intValue) + return i32, true +} + +// Float returns float value for fc. +// +// False is returned if fc doesn't contain float value. +func (fc *FieldContext) Float() (float32, bool) { + if fc.wireType != wireTypeI32 { + return 0, false + } + u32 := mustGetUint32(fc.intValue) + v := math.Float32frombits(u32) + return v, true +} + +// UnpackInt32s unpacks int32 values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain int32 values. +func (fc *FieldContext) UnpackInt32s(dst []int32) ([]int32, bool) { + if fc.wireType == wireTypeVarint { + i32, ok := getInt32(fc.intValue) + if !ok { + return dst, false + } + dst = append(dst, i32) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + u64, offset := binary.Uvarint(src) + if offset <= 0 { + return dstOrig, false + } + src = src[offset:] + i32, ok := getInt32(u64) + if !ok { + return dstOrig, false + } + dst = append(dst, i32) + } + return dst, true +} + +// UnpackInt64s unpacks int64 values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain int64 values. +func (fc *FieldContext) UnpackInt64s(dst []int64) ([]int64, bool) { + if fc.wireType == wireTypeVarint { + dst = append(dst, int64(fc.intValue)) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + u64, offset := binary.Uvarint(src) + if offset <= 0 { + return dstOrig, false + } + src = src[offset:] + dst = append(dst, int64(u64)) + } + return dst, true +} + +// UnpackUint32s unpacks uint32 values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain uint32 values. +func (fc *FieldContext) UnpackUint32s(dst []uint32) ([]uint32, bool) { + if fc.wireType == wireTypeVarint { + u32, ok := getUint32(fc.intValue) + if !ok { + return dst, false + } + dst = append(dst, u32) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + u64, offset := binary.Uvarint(src) + if offset <= 0 { + return dstOrig, false + } + src = src[offset:] + u32, ok := getUint32(u64) + if !ok { + return dstOrig, false + } + dst = append(dst, u32) + } + return dst, true +} + +// UnpackUint64s unpacks uint64 values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain uint64 values. +func (fc *FieldContext) UnpackUint64s(dst []uint64) ([]uint64, bool) { + if fc.wireType == wireTypeVarint { + dst = append(dst, fc.intValue) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + u64, offset := binary.Uvarint(src) + if offset <= 0 { + return dstOrig, false + } + src = src[offset:] + dst = append(dst, u64) + } + return dst, true +} + +// UnpackSint32s unpacks sint32 values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain sint32 values. +func (fc *FieldContext) UnpackSint32s(dst []int32) ([]int32, bool) { + if fc.wireType == wireTypeVarint { + u32, ok := getUint32(fc.intValue) + if !ok { + return dst, false + } + i32 := decodeZigZagInt32(u32) + dst = append(dst, i32) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + u64, offset := binary.Uvarint(src) + if offset <= 0 { + return dstOrig, false + } + src = src[offset:] + u32, ok := getUint32(u64) + if !ok { + return dstOrig, false + } + i32 := decodeZigZagInt32(u32) + dst = append(dst, i32) + } + return dst, true +} + +// UnpackSint64s unpacks sint64 values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain sint64 values. +func (fc *FieldContext) UnpackSint64s(dst []int64) ([]int64, bool) { + if fc.wireType == wireTypeVarint { + i64 := decodeZigZagInt64(fc.intValue) + dst = append(dst, i64) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + u64, offset := binary.Uvarint(src) + if offset <= 0 { + return dstOrig, false + } + src = src[offset:] + i64 := decodeZigZagInt64(u64) + dst = append(dst, i64) + } + return dst, true +} + +// UnpackBools unpacks bool values from fc, appends them to dst and returns the result. +// +// False is returned in the second result if fc doesn't contain bool values. +func (fc *FieldContext) UnpackBools(dst []bool) ([]bool, bool) { + if fc.wireType == wireTypeVarint { + v, ok := getBool(fc.intValue) + if !ok { + return dst, false + } + dst = append(dst, v) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + u64, offset := binary.Uvarint(src) + if offset <= 0 { + return dstOrig, false + } + src = src[offset:] + v, ok := getBool(u64) + if !ok { + return dst, false + } + dst = append(dst, v) + } + return dst, true +} + +// UnpackFixed64s unpacks fixed64 values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain fixed64 values. +func (fc *FieldContext) UnpackFixed64s(dst []uint64) ([]uint64, bool) { + if fc.wireType == wireTypeI64 { + u64 := fc.intValue + dst = append(dst, u64) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + if len(src) < 8 { + return dstOrig, false + } + u64 := binary.LittleEndian.Uint64(src) + src = src[8:] + dst = append(dst, u64) + } + return dst, true +} + +// UnpackSfixed64s unpacks sfixed64 values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain sfixed64 values. +func (fc *FieldContext) UnpackSfixed64s(dst []int64) ([]int64, bool) { + if fc.wireType == wireTypeI64 { + u64 := fc.intValue + dst = append(dst, int64(u64)) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + if len(src) < 8 { + return dstOrig, false + } + u64 := binary.LittleEndian.Uint64(src) + src = src[8:] + dst = append(dst, int64(u64)) + } + return dst, true +} + +// UnpackDoubles unpacks double values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain double values. +func (fc *FieldContext) UnpackDoubles(dst []float64) ([]float64, bool) { + if fc.wireType == wireTypeI64 { + v := math.Float64frombits(fc.intValue) + dst = append(dst, v) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + if len(src) < 8 { + return dstOrig, false + } + u64 := binary.LittleEndian.Uint64(src) + src = src[8:] + v := math.Float64frombits(u64) + dst = append(dst, v) + } + return dst, true +} + +// UnpackFixed32s unpacks fixed32 values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain fixed32 values. +func (fc *FieldContext) UnpackFixed32s(dst []uint32) ([]uint32, bool) { + if fc.wireType == wireTypeI32 { + u32 := mustGetUint32(fc.intValue) + dst = append(dst, u32) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + if len(src) < 4 { + return dstOrig, false + } + u32 := binary.LittleEndian.Uint32(src) + src = src[4:] + dst = append(dst, u32) + } + return dst, true +} + +// UnpackSfixed32s unpacks sfixed32 values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain sfixed32 values. +func (fc *FieldContext) UnpackSfixed32s(dst []int32) ([]int32, bool) { + if fc.wireType == wireTypeI32 { + i32 := mustGetInt32(fc.intValue) + dst = append(dst, i32) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + if len(src) < 4 { + return dstOrig, false + } + u32 := binary.LittleEndian.Uint32(src) + src = src[4:] + dst = append(dst, int32(u32)) + } + return dst, true +} + +// UnpackFloats unpacks float values from fc, appends them to dst and returns the result. +// +// False is returned if fc doesn't contain float values. +func (fc *FieldContext) UnpackFloats(dst []float32) ([]float32, bool) { + if fc.wireType == wireTypeI32 { + u32 := mustGetUint32(fc.intValue) + v := math.Float32frombits(u32) + dst = append(dst, v) + return dst, true + } + if fc.wireType != wireTypeLen { + return dst, false + } + src := fc.data + dstOrig := dst + for len(src) > 0 { + if len(src) < 4 { + return dstOrig, false + } + u32 := binary.LittleEndian.Uint32(src) + src = src[4:] + v := math.Float32frombits(u32) + dst = append(dst, v) + } + return dst, true +} + +func decodeZigZagInt64(u64 uint64) int64 { + return int64(u64>>1) ^ (int64(u64<<63) >> 63) +} + +func decodeZigZagInt32(u32 uint32) int32 { + return int32(u32>>1) ^ (int32(u32<<31) >> 31) +} + +func unsafeBytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func getInt32(u64 uint64) (int32, bool) { + u32, ok := getUint32(u64) + if !ok { + return 0, false + } + return int32(u32), true +} + +func getUint32(u64 uint64) (uint32, bool) { + if u64 > math.MaxUint32 { + return 0, false + } + return uint32(u64), true +} + +func mustGetInt32(u64 uint64) int32 { + u32 := mustGetUint32(u64) + return int32(u32) +} + +func mustGetUint32(u64 uint64) uint32 { + u32, ok := getUint32(u64) + if !ok { + panic(fmt.Errorf("BUG: cannot get uint32 from %d", u64)) + } + return u32 +} + +func getBool(u64 uint64) (bool, bool) { + if u64 == 0 { + return false, true + } + if u64 == 1 { + return true, true + } + return false, false +} diff --git a/vendor/github.com/VictoriaMetrics/easyproto/writer.go b/vendor/github.com/VictoriaMetrics/easyproto/writer.go new file mode 100644 index 00000000000..6cbc9343e50 --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/easyproto/writer.go @@ -0,0 +1,718 @@ +package easyproto + +import ( + "encoding/binary" + "math" + "math/bits" + "sync" +) + +// MarshalerPool is a pool of Marshaler structs. +type MarshalerPool struct { + p sync.Pool +} + +// Get obtains a Marshaler from the pool. +// +// The returned Marshaler can be returned to the pool via Put after it is no longer needed. +func (mp *MarshalerPool) Get() *Marshaler { + v := mp.p.Get() + if v == nil { + return &Marshaler{} + } + return v.(*Marshaler) +} + +// Put returns the given m to the pool. +// +// m cannot be used after returning to the pool. +func (mp *MarshalerPool) Put(m *Marshaler) { + m.Reset() + mp.p.Put(m) +} + +// Marshaler helps marshaling arbitrary protobuf messages. +// +// Construct message with Append* functions at MessageMarshaler() and then call Marshal* for marshaling the constructed message. +// +// It is unsafe to use a single Marshaler instance from multiple concurrently running goroutines. +// +// It is recommended re-cycling Marshaler via MarshalerPool in order to reduce memory allocations. +type Marshaler struct { + // mm contains the root MessageMarshaler. + mm *MessageMarshaler + + // buf contains temporary data needed for marshaling the protobuf message. + buf []byte + + // fs contains fields for the currently marshaled message. + fs []field + + // mms contains MessageMarshaler structs for the currently marshaled message. + mms []MessageMarshaler +} + +// MessageMarshaler helps constructing protobuf message for marshaling. +// +// MessageMarshaler must be obtained via Marshaler.MessageMarshaler(). +type MessageMarshaler struct { + // m is the parent Marshaler for the given MessageMarshaler. + m *Marshaler + + // tag contains protobuf message tag for the given MessageMarshaler. + tag uint64 + + // firstFieldIdx contains the index of the first field in the Marshaler.fs, which belongs to MessageMarshaler. + firstFieldIdx int + + // lastFieldIdx is the index of the last field in the Marshaler.fs, which belongs to MessageMarshaler. + lastFieldIdx int +} + +func (mm *MessageMarshaler) reset() { + mm.m = nil + mm.tag = 0 + mm.firstFieldIdx = -1 + mm.lastFieldIdx = -1 +} + +type field struct { + // messageSize is the size of marshaled protobuf message for the given field. + messageSize uint64 + + // dataStart is the start offset of field data at Marshaler.buf. + dataStart int + + // dataEnd is the end offset of field data at Marshaler.buf. + dataEnd int + + // nextFieldIdx contains an index of the next field in Marshaler.fs. + nextFieldIdx int + + // childMessageMarshalerIdx contains an index of child MessageMarshaler in Marshaler.mms. + childMessageMarshalerIdx int +} + +func (f *field) reset() { + f.messageSize = 0 + f.dataStart = 0 + f.dataEnd = 0 + f.nextFieldIdx = -1 + f.childMessageMarshalerIdx = -1 +} + +// Reset resets m, so it can be re-used. +func (m *Marshaler) Reset() { + m.mm = nil + m.buf = m.buf[:0] + + // There is no need in resetting individual fields, since they are reset in newFieldIndex() + m.fs = m.fs[:0] + + // There is no need in resetting individual MessageMarshaler items, since they are reset in newMessageMarshalerIndex() + m.mms = m.mms[:0] +} + +// MarshalWithLen marshals m, appends its length together with the marshaled m to dst and returns the result. +// +// E.g. appends length-delimited protobuf message to dst. +// The length of the resulting message can be read via UnmarshalMessageLen() function. +// +// See also Marshal. +func (m *Marshaler) MarshalWithLen(dst []byte) []byte { + if m.mm == nil { + dst = marshalVarUint64(dst, 0) + return dst + } + if firstFieldIdx := m.mm.firstFieldIdx; firstFieldIdx >= 0 { + f := &m.fs[firstFieldIdx] + messageSize := f.initMessageSize(m) + if cap(dst) == 0 { + dst = make([]byte, messageSize+10) + dst = dst[:0] + } + dst = marshalVarUint64(dst, messageSize) + dst = f.marshal(dst, m) + } + return dst +} + +// Marshal appends marshaled protobuf m to dst and returns the result. +// +// The marshaled message can be read via FieldContext.NextField(). +// +// See also MarshalWithLen. +func (m *Marshaler) Marshal(dst []byte) []byte { + if m.mm == nil { + // Nothing to marshal + return dst + } + if firstFieldIdx := m.mm.firstFieldIdx; firstFieldIdx >= 0 { + f := &m.fs[firstFieldIdx] + messageSize := f.initMessageSize(m) + if cap(dst) == 0 { + dst = make([]byte, messageSize) + dst = dst[:0] + } + dst = f.marshal(dst, m) + } + return dst +} + +// MessageMarshaler returns message marshaler for the given m. +func (m *Marshaler) MessageMarshaler() *MessageMarshaler { + if mm := m.mm; mm != nil { + return mm + } + idx := m.newMessageMarshalerIndex() + mm := &m.mms[idx] + m.mm = mm + return mm +} + +func (m *Marshaler) newMessageMarshalerIndex() int { + mms := m.mms + mmsLen := len(mms) + if cap(mms) > mmsLen { + mms = mms[:mmsLen+1] + } else { + mms = append(mms, MessageMarshaler{}) + } + m.mms = mms + mm := &mms[mmsLen] + mm.reset() + mm.m = m + return mmsLen +} + +func (m *Marshaler) newFieldIndex() int { + fs := m.fs + fsLen := len(fs) + if cap(fs) > fsLen { + fs = fs[:fsLen+1] + } else { + fs = append(fs, field{}) + } + m.fs = fs + fs[fsLen].reset() + return fsLen +} + +// AppendInt32 appends the given int32 value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendInt32(fieldNum uint32, i32 int32) { + mm.AppendUint64(fieldNum, uint64(uint32(i32))) +} + +// AppendInt64 appends the given int64 value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendInt64(fieldNum uint32, i64 int64) { + mm.AppendUint64(fieldNum, uint64(i64)) +} + +// AppendUint32 appends the given uint32 value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendUint32(fieldNum, u32 uint32) { + mm.AppendUint64(fieldNum, uint64(u32)) +} + +// AppendUint64 appends the given uint64 value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendUint64(fieldNum uint32, u64 uint64) { + tag := makeTag(fieldNum, wireTypeVarint) + + m := mm.m + dst := m.buf + dstLen := len(dst) + if tag < 0x80 { + dst = append(dst, byte(tag)) + } else { + dst = marshalVarUint64(dst, tag) + } + dst = marshalVarUint64(dst, u64) + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +// AppendSint32 appends the given sint32 value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendSint32(fieldNum uint32, i32 int32) { + u64 := uint64(encodeZigZagInt32(i32)) + mm.AppendUint64(fieldNum, u64) +} + +// AppendSint64 appends the given sint64 value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendSint64(fieldNum uint32, i64 int64) { + u64 := encodeZigZagInt64(i64) + mm.AppendUint64(fieldNum, u64) +} + +// AppendBool appends the given bool value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendBool(fieldNum uint32, v bool) { + u64 := uint64(0) + if v { + u64 = 1 + } + mm.AppendUint64(fieldNum, u64) +} + +// AppendFixed64 appends fixed64 value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendFixed64(fieldNum uint32, u64 uint64) { + tag := makeTag(fieldNum, wireTypeI64) + + m := mm.m + dst := m.buf + dstLen := len(dst) + if tag < 0x80 { + dst = append(dst, byte(tag)) + } else { + dst = marshalVarUint64(dst, tag) + } + dst = marshalUint64(dst, u64) + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +// AppendSfixed64 appends sfixed64 value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendSfixed64(fieldNum uint32, i64 int64) { + mm.AppendFixed64(fieldNum, uint64(i64)) +} + +// AppendDouble appends double value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendDouble(fieldNum uint32, f float64) { + u64 := math.Float64bits(f) + mm.AppendFixed64(fieldNum, u64) +} + +// AppendString appends string value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendString(fieldNum uint32, s string) { + tag := makeTag(fieldNum, wireTypeLen) + + m := mm.m + dst := m.buf + dstLen := len(dst) + sLen := len(s) + if tag < 0x80 && sLen < 0x80 { + dst = append(dst, byte(tag), byte(sLen)) + } else { + dst = marshalVarUint64(dst, tag) + dst = marshalVarUint64(dst, uint64(sLen)) + } + dst = append(dst, s...) + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +// AppendBytes appends bytes value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendBytes(fieldNum uint32, b []byte) { + s := unsafeBytesToString(b) + mm.AppendString(fieldNum, s) +} + +// AppendMessage appends protobuf message with the given fieldNum to m. +// +// The function returns the MessageMarshaler for constructing the appended message. +func (mm *MessageMarshaler) AppendMessage(fieldNum uint32) *MessageMarshaler { + tag := makeTag(fieldNum, wireTypeLen) + + f := mm.newField() + m := mm.m + f.childMessageMarshalerIdx = m.newMessageMarshalerIndex() + mmChild := &m.mms[f.childMessageMarshalerIdx] + mmChild.tag = tag + return mmChild +} + +// AppendFixed32 appends fixed32 value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendFixed32(fieldNum, u32 uint32) { + tag := makeTag(fieldNum, wireTypeI32) + + m := mm.m + dst := m.buf + dstLen := len(dst) + if tag < 0x80 { + dst = append(dst, byte(tag)) + } else { + dst = marshalVarUint64(dst, tag) + } + dst = marshalUint32(dst, u32) + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +// AppendSfixed32 appends sfixed32 value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendSfixed32(fieldNum uint32, i32 int32) { + mm.AppendFixed32(fieldNum, uint32(i32)) +} + +// AppendFloat appends float value under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendFloat(fieldNum uint32, f float32) { + u32 := math.Float32bits(f) + mm.AppendFixed32(fieldNum, u32) +} + +// AppendInt32s appends the given int32 values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendInt32s(fieldNum uint32, i32s []int32) { + child := mm.AppendMessage(fieldNum) + child.appendInt32s(i32s) +} + +// AppendInt64s appends the given int64 values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendInt64s(fieldNum uint32, i64s []int64) { + child := mm.AppendMessage(fieldNum) + child.appendInt64s(i64s) +} + +// AppendUint32s appends the given uint32 values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendUint32s(fieldNum uint32, u32s []uint32) { + child := mm.AppendMessage(fieldNum) + child.appendUint32s(u32s) +} + +// AppendUint64s appends the given uint64 values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendUint64s(fieldNum uint32, u64s []uint64) { + child := mm.AppendMessage(fieldNum) + child.appendUint64s(u64s) +} + +// AppendSint32s appends the given sint32 values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendSint32s(fieldNum uint32, i32s []int32) { + child := mm.AppendMessage(fieldNum) + child.appendSint32s(i32s) +} + +// AppendSint64s appends the given sint64 values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendSint64s(fieldNum uint32, i64s []int64) { + child := mm.AppendMessage(fieldNum) + child.appendSint64s(i64s) +} + +// AppendBools appends the given bool values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendBools(fieldNum uint32, bs []bool) { + child := mm.AppendMessage(fieldNum) + child.appendBools(bs) +} + +// AppendFixed64s appends the given fixed64 values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendFixed64s(fieldNum uint32, u64s []uint64) { + child := mm.AppendMessage(fieldNum) + child.appendFixed64s(u64s) +} + +// AppendSfixed64s appends the given sfixed64 values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendSfixed64s(fieldNum uint32, i64s []int64) { + child := mm.AppendMessage(fieldNum) + child.appendSfixed64s(i64s) +} + +// AppendDoubles appends the given double values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendDoubles(fieldNum uint32, fs []float64) { + child := mm.AppendMessage(fieldNum) + child.appendDoubles(fs) +} + +// AppendFixed32s appends the given fixed32 values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendFixed32s(fieldNum uint32, u32s []uint32) { + child := mm.AppendMessage(fieldNum) + child.appendFixed32s(u32s) +} + +// AppendSfixed32s appends the given sfixed32 values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendSfixed32s(fieldNum uint32, i32s []int32) { + child := mm.AppendMessage(fieldNum) + child.appendSfixed32s(i32s) +} + +// AppendFloats appends the given float values under the given fieldNum to mm. +func (mm *MessageMarshaler) AppendFloats(fieldNum uint32, fs []float32) { + child := mm.AppendMessage(fieldNum) + child.appendFloats(fs) +} + +func (mm *MessageMarshaler) appendInt32s(i32s []int32) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, i32 := range i32s { + dst = marshalVarUint64(dst, uint64(uint32(i32))) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendUint32s(u32s []uint32) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, u32 := range u32s { + dst = marshalVarUint64(dst, uint64(u32)) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendSint32s(i32s []int32) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, i32 := range i32s { + u64 := uint64(encodeZigZagInt32(i32)) + dst = marshalVarUint64(dst, u64) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendInt64s(i64s []int64) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, i64 := range i64s { + dst = marshalVarUint64(dst, uint64(i64)) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendUint64s(u64s []uint64) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, u64 := range u64s { + dst = marshalVarUint64(dst, u64) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendSint64s(i64s []int64) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, i64 := range i64s { + u64 := encodeZigZagInt64(i64) + dst = marshalVarUint64(dst, u64) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendBools(bs []bool) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, b := range bs { + u64 := uint64(0) + if b { + u64 = 1 + } + dst = marshalVarUint64(dst, u64) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendFixed64s(u64s []uint64) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, u64 := range u64s { + dst = marshalUint64(dst, u64) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendSfixed64s(i64s []int64) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, i64 := range i64s { + dst = marshalUint64(dst, uint64(i64)) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendFixed32s(u32s []uint32) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, u32 := range u32s { + dst = marshalUint32(dst, u32) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendSfixed32s(i32s []int32) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, i32 := range i32s { + dst = marshalUint32(dst, uint32(i32)) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendDoubles(fs []float64) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, f := range fs { + u64 := math.Float64bits(f) + dst = marshalUint64(dst, u64) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendFloats(fs []float32) { + m := mm.m + dst := m.buf + dstLen := len(dst) + for _, f := range fs { + u32 := math.Float32bits(f) + dst = marshalUint32(dst, u32) + } + m.buf = dst + + mm.appendField(m, dstLen, len(dst)) +} + +func (mm *MessageMarshaler) appendField(m *Marshaler, dataStart, dataEnd int) { + if lastFieldIdx := mm.lastFieldIdx; lastFieldIdx >= 0 { + if f := &m.fs[lastFieldIdx]; f.childMessageMarshalerIdx == -1 && f.dataEnd == dataStart { + f.dataEnd = dataEnd + return + } + } + f := mm.newField() + f.dataStart = dataStart + f.dataEnd = dataEnd +} + +func (mm *MessageMarshaler) newField() *field { + m := mm.m + idx := m.newFieldIndex() + f := &m.fs[idx] + if lastFieldIdx := mm.lastFieldIdx; lastFieldIdx >= 0 { + m.fs[lastFieldIdx].nextFieldIdx = idx + } else { + mm.firstFieldIdx = idx + } + mm.lastFieldIdx = idx + return f +} + +func (f *field) initMessageSize(m *Marshaler) uint64 { + n := uint64(0) + for { + if childMessageMarshalerIdx := f.childMessageMarshalerIdx; childMessageMarshalerIdx < 0 { + n += uint64(f.dataEnd - f.dataStart) + } else { + mmChild := m.mms[childMessageMarshalerIdx] + if tag := mmChild.tag; tag < 0x80 { + n++ + } else { + n += varuintLen(tag) + } + messageSize := uint64(0) + if firstFieldIdx := mmChild.firstFieldIdx; firstFieldIdx >= 0 { + messageSize = m.fs[firstFieldIdx].initMessageSize(m) + } + n += messageSize + if messageSize < 0x80 { + n++ + } else { + n += varuintLen(messageSize) + } + f.messageSize = messageSize + } + nextFieldIdx := f.nextFieldIdx + if nextFieldIdx < 0 { + return n + } + f = &m.fs[nextFieldIdx] + } +} + +func (f *field) marshal(dst []byte, m *Marshaler) []byte { + for { + if childMessageMarshalerIdx := f.childMessageMarshalerIdx; childMessageMarshalerIdx < 0 { + data := m.buf[f.dataStart:f.dataEnd] + dst = append(dst, data...) + } else { + mmChild := m.mms[childMessageMarshalerIdx] + tag := mmChild.tag + messageSize := f.messageSize + if tag < 0x80 && messageSize < 0x80 { + dst = append(dst, byte(tag), byte(messageSize)) + } else { + dst = marshalVarUint64(dst, mmChild.tag) + dst = marshalVarUint64(dst, f.messageSize) + } + if firstFieldIdx := mmChild.firstFieldIdx; firstFieldIdx >= 0 { + dst = m.fs[firstFieldIdx].marshal(dst, m) + } + } + nextFieldIdx := f.nextFieldIdx + if nextFieldIdx < 0 { + return dst + } + f = &m.fs[nextFieldIdx] + } +} + +func marshalUint64(dst []byte, u64 uint64) []byte { + return binary.LittleEndian.AppendUint64(dst, u64) +} + +func marshalUint32(dst []byte, u32 uint32) []byte { + return binary.LittleEndian.AppendUint32(dst, u32) +} + +func marshalVarUint64(dst []byte, u64 uint64) []byte { + if u64 < 0x80 { + // Fast path + dst = append(dst, byte(u64)) + return dst + } + for u64 > 0x7f { + dst = append(dst, 0x80|byte(u64)) + u64 >>= 7 + } + dst = append(dst, byte(u64)) + return dst +} + +func encodeZigZagInt64(i64 int64) uint64 { + return uint64((i64 << 1) ^ (i64 >> 63)) +} + +func encodeZigZagInt32(i32 int32) uint32 { + return uint32((i32 << 1) ^ (i32 >> 31)) +} + +func makeTag(fieldNum uint32, wt wireType) uint64 { + return (uint64(fieldNum) << 3) | uint64(wt) +} + +// varuintLen returns the number of bytes needed for varuint-encoding of u64. +// +// Note that it returns 0 for u64=0, so this case must be handled separately. +func varuintLen(u64 uint64) uint64 { + return uint64(((byte(bits.Len64(u64))) + 6) / 7) +} diff --git a/vendor/github.com/thanos-io/objstore/CHANGELOG.md b/vendor/github.com/thanos-io/objstore/CHANGELOG.md index 6fee1ccd411..120720817ff 100644 --- a/vendor/github.com/thanos-io/objstore/CHANGELOG.md +++ b/vendor/github.com/thanos-io/objstore/CHANGELOG.md @@ -15,6 +15,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#157](https://github.com/thanos-io/objstore/pull/157) Azure: Add `az_tenant_id`, `client_id` and `client_secret` configs. ### Fixed +- [#196](https://github.com/thanos-io/objstore/pull/196) GCS: fix error check in Exists method when object does not exist. - [#153](https://github.com/thanos-io/objstore/pull/153) Metrics: Fix `objstore_bucket_operation_duration_seconds_*` for `get` and `get_range` operations. - [#141](https://github.com/thanos-io/objstore/pull/142) S3: Fix missing encryption configuration for `Bucket.Exists()` and `Bucket.Attributes()` calls. - [#117](https://github.com/thanos-io/objstore/pull/117) Metrics: Fix `objstore_bucket_operation_failures_total` incorrectly incremented if context is cancelled while reading object contents. diff --git a/vendor/github.com/thanos-io/objstore/inmem.go b/vendor/github.com/thanos-io/objstore/inmem.go index ff27321ad7f..ec74d03f8e2 100644 --- a/vendor/github.com/thanos-io/objstore/inmem.go +++ b/vendor/github.com/thanos-io/objstore/inmem.go @@ -34,6 +34,24 @@ func NewInMemBucket() *InMemBucket { } } +// ChangeLastModified changes the last modified timestamp of the object at the given path. +// If the object does not exist, it returns an error. +// This method is useful for testing purposes to simulate updates to objects. +func (b *InMemBucket) ChangeLastModified(path string, lastModified time.Time) error { + b.mtx.Lock() + defer b.mtx.Unlock() + + if _, ok := b.objects[path]; !ok { + return errNotFound + } + + attrs := b.attrs[path] + attrs.LastModified = lastModified + b.attrs[path] = attrs + + return nil +} + func (b *InMemBucket) Provider() ObjProvider { return MEMORY } // Objects returns a copy of the internally stored objects. diff --git a/vendor/github.com/thanos-io/objstore/objstore.go b/vendor/github.com/thanos-io/objstore/objstore.go index bdbb52a390f..f6bcac6cab4 100644 --- a/vendor/github.com/thanos-io/objstore/objstore.go +++ b/vendor/github.com/thanos-io/objstore/objstore.go @@ -591,7 +591,6 @@ func wrapWithMetrics(b Bucket, metrics *Metrics) *metricBucket { bkt.metrics.ops.WithLabelValues(op) bkt.metrics.opsFailures.WithLabelValues(op) bkt.metrics.opsDuration.WithLabelValues(op) - bkt.metrics.opsFetchedBytes.WithLabelValues(op) } // fetched bytes only relevant for get, getrange and upload @@ -600,6 +599,7 @@ func wrapWithMetrics(b Bucket, metrics *Metrics) *metricBucket { OpGetRange, OpUpload, } { + bkt.metrics.opsFetchedBytes.WithLabelValues(op) bkt.metrics.opsTransferredBytes.WithLabelValues(op) } return bkt diff --git a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go index 484e33a9168..b8723b8ca04 100644 --- a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go +++ b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go @@ -325,7 +325,7 @@ func (b *Bucket) Handle() *storage.BucketHandle { func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { if _, err := b.bkt.Object(name).Attrs(ctx); err == nil { return true, nil - } else if err != storage.ErrObjectNotExist { + } else if !b.IsObjNotFoundErr(err) { return false, err } return false, nil diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 238d3ea7ae1..29e3abf4bdd 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -141,39 +141,29 @@ func upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st return errors.Wrap(err, "gather meta file stats") } - if err := meta.Write(&metaEncoded); err != nil { - return errors.Wrap(err, "encode meta file") - } - if err := objstore.UploadDir(ctx, logger, bkt, filepath.Join(bdir, ChunksDirname), path.Join(id.String(), ChunksDirname), options...); err != nil { - return cleanUp(logger, bkt, id, errors.Wrap(err, "upload chunks")) + return errors.Wrap(err, "upload chunks") } if err := objstore.UploadFile(ctx, logger, bkt, filepath.Join(bdir, IndexFilename), path.Join(id.String(), IndexFilename)); err != nil { - return cleanUp(logger, bkt, id, errors.Wrap(err, "upload index")) + return errors.Wrap(err, "upload index") + } + + meta.Thanos.UploadTime = time.Now().UTC() + if err := meta.Write(&metaEncoded); err != nil { + return errors.Wrap(err, "encode meta file") } // Meta.json always need to be uploaded as a last item. This will allow to assume block directories without meta file to be pending uploads. if err := bkt.Upload(ctx, path.Join(id.String(), MetaFilename), strings.NewReader(metaEncoded.String())); err != nil { - // Don't call cleanUp here. Despite getting error, meta.json may have been uploaded in certain cases, - // and even though cleanUp will not see it yet, meta.json may appear in the bucket later. - // (Eg. S3 is known to behave this way when it returns 503 "SlowDown" error). - // If meta.json is not uploaded, this will produce partial blocks, but such blocks will be cleaned later. + // Syncer always checks if meta.json exists in the next iteration and will retry if it does not. + // This is to avoid partial uploads. return errors.Wrap(err, "upload meta file") } return nil } -func cleanUp(logger log.Logger, bkt objstore.Bucket, id ulid.ULID, err error) error { - // Cleanup the dir with an uncancelable context. - cleanErr := Delete(context.Background(), logger, bkt, id) - if cleanErr != nil { - return errors.Wrapf(err, "failed to clean block after upload issue. Partial block in system. Err: %s", cleanErr.Error()) - } - return err -} - // MarkForDeletion creates a file which stores information about when the block was marked for deletion. func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, details string, markedForDeletion prometheus.Counter) error { deletionMarkFile := path.Join(id.String(), metadata.DeletionMarkFilename) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index d8882868ae6..a3b319697e8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "io" + "maps" "os" "path" "path/filepath" @@ -265,7 +266,7 @@ func (f *ConcurrentLister) GetActiveAndPartialBlockIDs(ctx context.Context, acti eg, gCtx = errgroup.WithContext(ctx) mu sync.Mutex ) - for i := 0; i < concurrency; i++ { + for range concurrency { eg.Go(func() error { for uid := range metaChan { // TODO(bwplotka): If that causes problems (obj store rate limits), add longer ttl to cached items. @@ -410,12 +411,12 @@ func NewMetaFetcherWithMetrics(logger log.Logger, concurrency int, bkt objstore. } // NewMetaFetcher transforms BaseFetcher into actually usable *MetaFetcher. -func (f *BaseFetcher) NewMetaFetcher(reg prometheus.Registerer, filters []MetadataFilter, logTags ...interface{}) *MetaFetcher { +func (f *BaseFetcher) NewMetaFetcher(reg prometheus.Registerer, filters []MetadataFilter, logTags ...any) *MetaFetcher { return f.NewMetaFetcherWithMetrics(NewFetcherMetrics(reg, nil, nil), filters, logTags...) } // NewMetaFetcherWithMetrics transforms BaseFetcher into actually usable *MetaFetcher. -func (f *BaseFetcher) NewMetaFetcherWithMetrics(fetcherMetrics *FetcherMetrics, filters []MetadataFilter, logTags ...interface{}) *MetaFetcher { +func (f *BaseFetcher) NewMetaFetcherWithMetrics(fetcherMetrics *FetcherMetrics, filters []MetadataFilter, logTags ...any) *MetaFetcher { return &MetaFetcher{metrics: fetcherMetrics, wrapped: f, filters: filters, logger: log.With(f.logger, logTags...)} } @@ -516,7 +517,7 @@ type response struct { corruptedMetas float64 } -func (f *BaseFetcher) fetchMetadata(ctx context.Context) (interface{}, error) { +func (f *BaseFetcher) fetchMetadata(ctx context.Context) (any, error) { f.syncs.Inc() var ( @@ -606,9 +607,7 @@ func (f *BaseFetcher) fetchMetadata(ctx context.Context) (interface{}, error) { } modifiedTimestamps := make(map[ulid.ULID]time.Time, len(resp.modifiedTimestamps)) - for id, ts := range resp.modifiedTimestamps { - modifiedTimestamps[id] = ts - } + maps.Copy(modifiedTimestamps, resp.modifiedTimestamps) f.mtx.Lock() f.cached = cached @@ -658,7 +657,7 @@ func (f *BaseFetcher) fetch(ctx context.Context, metrics *FetcherMetrics, filter // Run this in thread safe run group. // TODO(bwplotka): Consider custom singleflight with ttl. - v, err := f.g.Do("", func() (i interface{}, err error) { + v, err := f.g.Do("", func() (i any, err error) { // NOTE: First go routine context will go through. return f.fetchMetadata(ctx) }) @@ -669,9 +668,7 @@ func (f *BaseFetcher) fetch(ctx context.Context, metrics *FetcherMetrics, filter // Copy as same response might be reused by different goroutines. metas := make(map[ulid.ULID]*metadata.Meta, len(resp.metas)) - for id, m := range resp.metas { - metas[id] = m - } + maps.Copy(metas, resp.metas) metrics.Synced.WithLabelValues(FailedMeta).Set(float64(len(resp.metaErrs))) metrics.Synced.WithLabelValues(NoMeta).Set(resp.noMetas) @@ -698,7 +695,7 @@ func (f *BaseFetcher) countCached() int { f.mtx.Lock() defer f.mtx.Unlock() var i int - f.cached.Range(func(_, _ interface{}) bool { + f.cached.Range(func(_, _ any) bool { i++ return true }) @@ -824,20 +821,32 @@ func NewDeduplicateFilter(concurrency int) *DefaultDeduplicateFilter { // Filter filters out duplicate blocks that can be formed // from two or more overlapping blocks that fully submatches the source blocks of the older blocks. func (f *DefaultDeduplicateFilter) Filter(_ context.Context, metas map[ulid.ULID]*metadata.Meta, synced GaugeVec, modified GaugeVec) error { - f.duplicateIDs = f.duplicateIDs[:0] - - var wg sync.WaitGroup + var filterWg, dupWg sync.WaitGroup var groupChan = make(chan []*metadata.Meta) + var dupsChan = make(chan ulid.ULID) + + dupWg.Go(func() { + dups := make([]ulid.ULID, 0) + for dup := range dupsChan { + if metas[dup] != nil { + dups = append(dups, dup) + } + synced.WithLabelValues(duplicateMeta).Inc() + delete(metas, dup) + } + f.mu.Lock() + f.duplicateIDs = dups + f.mu.Unlock() + }) + // Start up workers to deduplicate workgroups when they're ready. for i := 0; i < f.concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() + filterWg.Go(func() { for group := range groupChan { - f.filterGroup(group, metas, synced) + f.filterGroup(group, dupsChan) } - }() + }) } // We need only look within a compaction group for duplicates, so splitting by group key gives us parallelizable streams. @@ -850,12 +859,15 @@ func (f *DefaultDeduplicateFilter) Filter(_ context.Context, metas map[ulid.ULID groupChan <- group } close(groupChan) - wg.Wait() + filterWg.Wait() + + close(dupsChan) + dupWg.Wait() return nil } -func (f *DefaultDeduplicateFilter) filterGroup(metaSlice []*metadata.Meta, metas map[ulid.ULID]*metadata.Meta, synced GaugeVec) { +func (f *DefaultDeduplicateFilter) filterGroup(metaSlice []*metadata.Meta, dupsChan chan ulid.ULID) { sort.Slice(metaSlice, func(i, j int) bool { ilen := len(metaSlice[i].Compaction.Sources) jlen := len(metaSlice[j].Compaction.Sources) @@ -886,19 +898,16 @@ childLoop: coveringSet = append(coveringSet, child) } - f.mu.Lock() for _, duplicate := range duplicates { - if metas[duplicate] != nil { - f.duplicateIDs = append(f.duplicateIDs, duplicate) - } - synced.WithLabelValues(duplicateMeta).Inc() - delete(metas, duplicate) + dupsChan <- duplicate } - f.mu.Unlock() } // DuplicateIDs returns slice of block ids that are filtered out by DefaultDeduplicateFilter. func (f *DefaultDeduplicateFilter) DuplicateIDs() []ulid.ULID { + f.mu.Lock() + defer f.mu.Unlock() + return f.duplicateIDs } @@ -941,9 +950,7 @@ func (r *ReplicaLabelRemover) Filter(_ context.Context, metas map[ulid.ULID]*met countReplicaLabelRemoved := make(map[string]int, len(metas)) for u, meta := range metas { l := make(map[string]string) - for n, v := range meta.Thanos.Labels { - l[n] = v - } + maps.Copy(l, meta.Thanos.Labels) for _, replicaLabel := range r.replicaLabels { if _, exists := l[replicaLabel]; exists { @@ -1001,10 +1008,18 @@ func NewConsistencyDelayMetaFilterWithoutMetrics(logger log.Logger, consistencyD // Filter filters out blocks that filters blocks that have are created before a specified consistency delay. func (f *ConsistencyDelayMetaFilter) Filter(_ context.Context, metas map[ulid.ULID]*metadata.Meta, synced GaugeVec, modified GaugeVec) error { for id, meta := range metas { + var metaUploadTime = meta.Thanos.UploadTime + + var tooFresh bool + if !metaUploadTime.IsZero() { + tooFresh = time.Since(metaUploadTime) < f.consistencyDelay + } else { + tooFresh = ulid.Now()-id.Time() < uint64(f.consistencyDelay/time.Millisecond) + } + // TODO(khyatisoneji): Remove the checks about Thanos Source // by implementing delete delay to fetch metas. - // TODO(bwplotka): Check consistency delay based on file upload / modification time instead of ULID. - if ulid.Now()-id.Time() < uint64(f.consistencyDelay/time.Millisecond) && + if tooFresh && meta.Thanos.Source != metadata.BucketRepairSource && meta.Thanos.Source != metadata.CompactorSource && meta.Thanos.Source != metadata.CompactorRepairSource { @@ -1048,9 +1063,7 @@ func (f *IgnoreDeletionMarkFilter) DeletionMarkBlocks() map[ulid.ULID]*metadata. defer f.mtx.Unlock() deletionMarkMap := make(map[ulid.ULID]*metadata.DeletionMark, len(f.deletionMarkMap)) - for id, meta := range f.deletionMarkMap { - deletionMarkMap[id] = meta - } + maps.Copy(deletionMarkMap, f.deletionMarkMap) return deletionMarkMap } @@ -1068,11 +1081,16 @@ func (f *IgnoreDeletionMarkFilter) Filter(ctx context.Context, metas map[ulid.UL } var ( - eg errgroup.Group - ch = make(chan ulid.ULID, f.concurrency) - mtx sync.Mutex + eg errgroup.Group + ch = make(chan ulid.ULID, f.concurrency) + mtx sync.Mutex + preFilterMetas = make(map[ulid.ULID]struct{}, len(metas)) ) + for k := range metas { + preFilterMetas[k] = struct{}{} + } + for i := 0; i < f.concurrency; i++ { eg.Go(func() error { var lastErr error @@ -1127,7 +1145,19 @@ func (f *IgnoreDeletionMarkFilter) Filter(ctx context.Context, metas map[ulid.UL } f.mtx.Lock() - f.deletionMarkMap = deletionMarkMap + if f.deletionMarkMap == nil { + f.deletionMarkMap = make(map[ulid.ULID]*metadata.DeletionMark) + } + maps.Copy(f.deletionMarkMap, deletionMarkMap) + + for u := range f.deletionMarkMap { + if _, exists := preFilterMetas[u]; exists { + continue + } + + delete(f.deletionMarkMap, u) + } + f.mtx.Unlock() return nil @@ -1180,7 +1210,7 @@ func (f *ParquetMigratedMetaFilter) Filter(_ context.Context, metas map[ulid.ULI continue } - extensionsMap, ok := meta.Thanos.Extensions.(map[string]interface{}) + extensionsMap, ok := meta.Thanos.Extensions.(map[string]any) if !ok { continue } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go index 3206362b969..3c0b07d03a3 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go @@ -132,7 +132,7 @@ func (p *ReaderPool) NewBinaryReader(ctx context.Context, logger log.Logger, bkt } idBytes := id.Bytes() - lazyReader, err, _ := p.lazyReadersSF.Do(*(*string)(unsafe.Pointer(&idBytes)), func() (interface{}, error) { + lazyReader, err, _ := p.lazyReadersSF.Do(*(*string)(unsafe.Pointer(&idBytes)), func() (any, error) { return NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.lazyReader, p.metrics.binaryReader, p.onLazyReaderClosed, p.lazyDownloadFunc(meta)) }) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go index a0706743f86..63a56202d17 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go @@ -14,6 +14,7 @@ import ( "io" "os" "path/filepath" + "time" "github.com/go-kit/log" "github.com/oklog/ulid/v2" @@ -103,6 +104,10 @@ type Thanos struct { // Extensions are used for plugin any arbitrary additional information for block. Optional. Extensions any `json:"extensions,omitempty"` + + // UploadTime is used to track when the meta.json file was uploaded to the object storage + // without an extra Attributes call. Used for consistency filter. + UploadTime time.Time `json:"upload_time,omitempty"` } type IndexStats struct { diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/async_op.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/async_op.go index fb468a5a78f..0842881e97f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/async_op.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/async_op.go @@ -32,7 +32,7 @@ func NewAsyncOperationProcessor(bufferSize, concurrency int) *AsyncOperationProc } p.workers.Add(concurrency) - for i := 0; i < concurrency; i++ { + for range concurrency { go p.asyncQueueProcessLoop() } diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/cacheutil.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/cacheutil.go index 8f6dea7b7e9..22f3b41589f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/cacheutil.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/cacheutil.go @@ -38,10 +38,7 @@ func doWithBatch(ctx context.Context, totalSize int, batchSize int, ga gate.Gate } g, ctx := errgroup.WithContext(ctx) for i := 0; i < totalSize; i += batchSize { - j := i + batchSize - if j > totalSize { - j = totalSize - } + j := min(i+batchSize, totalSize) if ga != nil { if err := ga.Start(ctx); err != nil { return nil diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go index 09e664b4dcf..35059f07d4c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go @@ -198,10 +198,7 @@ func NewRedisClientWithConfig(logger log.Logger, name string, config RedisClient tlsConfig = tlsClientConfig } - clientSideCacheDisabled := false - if config.CacheSize == 0 { - clientSideCacheDisabled = true - } + clientSideCacheDisabled := config.CacheSize == 0 clientOpts := rueidis.ClientOption{ InitAddress: strings.Split(config.Addr, ","), diff --git a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/config.go b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/config.go index 9de1b4f5808..38565f6724a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/config.go +++ b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/config.go @@ -36,7 +36,7 @@ func DefaultConfig() Config { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Config) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultConfig() type plain Config return unmarshal((*plain)(c)) diff --git a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go index e7e83ef5b33..5c3f4038064 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go +++ b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go @@ -7,6 +7,7 @@ import ( "context" "crypto/tls" "fmt" + "maps" "net" "net/http" "net/url" @@ -290,9 +291,7 @@ func (u userAgentRoundTripper) RoundTrip(r *http.Request) (*http.Response, error r2 := new(http.Request) *r2 = *r r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } + maps.Copy(r2.Header, r.Header) r2.Header.Set("User-Agent", u.name) r = r2 } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/blocks_cleaner.go b/vendor/github.com/thanos-io/thanos/pkg/compact/blocks_cleaner.go index 5ae9120a8cb..9b525380ba9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/blocks_cleaner.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/blocks_cleaner.go @@ -9,6 +9,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/oklog/ulid/v2" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/objstore" @@ -40,21 +41,24 @@ func NewBlocksCleaner(logger log.Logger, bkt objstore.Bucket, ignoreDeletionMark // DeleteMarkedBlocks uses ignoreDeletionMarkFilter to gather the blocks that are marked for deletion and deletes those // if older than given deleteDelay. -func (s *BlocksCleaner) DeleteMarkedBlocks(ctx context.Context) error { +func (s *BlocksCleaner) DeleteMarkedBlocks(ctx context.Context) (map[ulid.ULID]struct{}, error) { level.Info(s.logger).Log("msg", "started cleaning of blocks marked for deletion") + deletedBlocks := make(map[ulid.ULID]struct{}, 0) + deletionMarkMap := s.ignoreDeletionMarkFilter.DeletionMarkBlocks() for _, deletionMark := range deletionMarkMap { if time.Since(time.Unix(deletionMark.DeletionTime, 0)).Seconds() > s.deleteDelay.Seconds() { if err := block.Delete(ctx, s.logger, s.bkt, deletionMark.ID); err != nil { s.blockCleanupFailures.Inc() - return errors.Wrap(err, "delete block") + return deletedBlocks, errors.Wrap(err, "delete block") } s.blocksCleaned.Inc() level.Info(s.logger).Log("msg", "deleted block marked for deletion", "block", deletionMark.ID) + deletedBlocks[deletionMark.ID] = struct{}{} } } level.Info(s.logger).Log("msg", "cleaning of blocks marked for deletion done") - return nil + return deletedBlocks, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/clean.go b/vendor/github.com/thanos-io/thanos/pkg/compact/clean.go index c8a0b88017f..19274c87777 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/clean.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/clean.go @@ -5,6 +5,7 @@ package compact import ( "context" + "fmt" "time" "github.com/go-kit/log" @@ -12,9 +13,11 @@ import ( "github.com/oklog/ulid/v2" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/timestamp" "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" ) const ( @@ -23,6 +26,34 @@ const ( PartialUploadThresholdAge = 2 * 24 * time.Hour ) +// getOldestModifiedTime returns the oldest modified time of a block in the bucket. +// If it is not possible to get the last modified timestamp then it falls back to the time +// encoded in the block's ULID. +func getOldestModifiedTime(ctx context.Context, blockID ulid.ULID, bkt objstore.Bucket) (time.Time, error) { + var lastModifiedTime time.Time + + err := bkt.IterWithAttributes(ctx, blockID.String(), func(attrs objstore.IterObjectAttributes) error { + lm, ok := attrs.LastModified() + if !ok { + return nil + } + if lm.After(lastModifiedTime) { + lastModifiedTime = lm + } + return nil + }, objstore.WithUpdatedAt(), objstore.WithRecursiveIter()) + + if err != nil { + return timestamp.Time(int64(blockID.Time())), err + } + + if lastModifiedTime.IsZero() { + return timestamp.Time(int64(blockID.Time())), fmt.Errorf("no last modified time found for block %s, using block creation time instead", blockID.String()) + } + + return lastModifiedTime, nil +} + func BestEffortCleanAbortedPartialUploads( ctx context.Context, logger log.Logger, @@ -31,26 +62,29 @@ func BestEffortCleanAbortedPartialUploads( deleteAttempts prometheus.Counter, blockCleanups prometheus.Counter, blockCleanupFailures prometheus.Counter, + deletionMarkBlocks map[ulid.ULID]*metadata.DeletionMark, ) { level.Info(logger).Log("msg", "started cleaning of aborted partial uploads") - // Delete partial blocks that are older than partialUploadThresholdAge. - // TODO(bwplotka): This is can cause data loss if blocks are: - // * being uploaded longer than partialUploadThresholdAge - // * being uploaded and started after their partialUploadThresholdAge - // can be assumed in this case. Keep partialUploadThresholdAge long for now. - // Mitigate this by adding ModifiedTime to bkt and check that instead of ULID (block creation time). for id := range partial { - if ulid.Now()-id.Time() <= uint64(PartialUploadThresholdAge/time.Millisecond) { - // Minimum delay has not expired, ignore for now. + // NOTE(GiedriusS): we start to delete blocks from meta.json so at that point they are marked as partial. + // If you have multiple compactor shards then they might try to delete the same block here. + // We do not want to delete blocks that are marked for deletion, as they are already scheduled for deletion in the blocks cleaner. + if _, ok := deletionMarkBlocks[id]; ok { + level.Debug(logger).Log("msg", "ignoring block marked for deletion", "block", id) + continue + } + + lastModifiedTime, err := getOldestModifiedTime(ctx, id, bkt) + if err != nil { + level.Warn(logger).Log("msg", "failed to get last modified time for block; falling back to block creation time", "block", id, "err", err) + } + if time.Since(lastModifiedTime) <= PartialUploadThresholdAge { continue } deleteAttempts.Inc() - level.Info(logger).Log("msg", "found partially uploaded block; marking for deletion", "block", id) - // We don't gather any information about deletion marks for partial blocks, so let's simply remove it. We waited - // long PartialUploadThresholdAge already. - // TODO(bwplotka): Fix some edge cases: https://github.com/thanos-io/thanos/issues/2470 . + level.Info(logger).Log("msg", "found partially uploaded block; deleting", "block", id) if err := block.Delete(ctx, logger, bkt, id); err != nil { blockCleanupFailures.Inc() level.Warn(logger).Log("msg", "failed to delete aborted partial upload; will retry in next iteration", "block", id, "thresholdAge", PartialUploadThresholdAge, "err", err) diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index dfd3b98516b..3d7655e5f14 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -6,6 +6,7 @@ package compact import ( "context" "fmt" + "maps" "math" "os" "path/filepath" @@ -147,7 +148,7 @@ func UntilNextDownsampling(m *metadata.Meta) (time.Duration, error) { // SyncMetas synchronizes local state of block metas with what we have in the bucket. func (s *Syncer) SyncMetas(ctx context.Context) error { - var cancel func() = func() {} + var cancel = func() {} if s.syncMetasTimeout > 0 { ctx, cancel = context.WithTimeout(ctx, s.syncMetasTimeout) } @@ -158,7 +159,7 @@ func (s *Syncer) SyncMetas(ctx context.Context) error { partial map[ulid.ULID]error } - container, err := s.g.Do("", func() (interface{}, error) { + container, err := s.g.Do("", func() (any, error) { metas, partial, err := s.fetcher.Fetch(ctx) return metasContainer{metas, partial}, err }) @@ -186,9 +187,7 @@ func (s *Syncer) Metas() map[ulid.ULID]*metadata.Meta { defer s.mtx.Unlock() metas := make(map[ulid.ULID]*metadata.Meta, len(s.blocks)) - for k, v := range s.blocks { - metas[k] = v - } + maps.Copy(metas, s.blocks) return metas } @@ -196,7 +195,9 @@ func (s *Syncer) Metas() map[ulid.ULID]*metadata.Meta { // GarbageCollect marks blocks for deletion from bucket if their data is available as part of a // block with a higher compaction level. // Call to SyncMetas function is required to populate duplicateIDs in duplicateBlocksFilter. -func (s *Syncer) GarbageCollect(ctx context.Context) error { +// There is a temporal dependency on deleting marked blocks because otherwise the filters might +// return inconsistent state if syncing of metas is happening in the background. +func (s *Syncer) GarbageCollect(ctx context.Context, justDeletedBlocks map[ulid.ULID]struct{}) error { begin := time.Now() // Ignore filter exists before deduplicate filter. @@ -206,10 +207,16 @@ func (s *Syncer) GarbageCollect(ctx context.Context) error { // GarbageIDs contains the duplicateIDs, since these blocks can be replaced with other blocks. // We also remove ids present in deletionMarkMap since these blocks are already marked for deletion. garbageIDs := []ulid.ULID{} + for _, id := range duplicateIDs { if _, exists := deletionMarkMap[id]; exists { continue } + + if _, exists := justDeletedBlocks[id]; exists { + continue + } + garbageIDs = append(garbageIDs, id) } @@ -488,7 +495,7 @@ func (cg *Group) deleteFromGroup(target map[ulid.ULID]struct{}) { defer cg.mtx.Unlock() var newGroupMeta []*metadata.Meta for _, meta := range cg.metasByMinTime { - if _, found := target[meta.BlockMeta.ULID]; !found { + if _, found := target[meta.ULID]; !found { newGroupMeta = append(newGroupMeta, meta) } } @@ -630,7 +637,7 @@ func (ps *CompactionProgressCalculator) ProgressCalculate(ctx context.Context, g metas := make([]*tsdb.BlockMeta, 0, len(plan)) for _, p := range plan { metas = append(metas, &p.BlockMeta) - toRemove[p.BlockMeta.ULID] = struct{}{} + toRemove[p.ULID] = struct{}{} } g.deleteFromGroup(toRemove) @@ -650,12 +657,12 @@ func (ps *CompactionProgressCalculator) ProgressCalculate(ctx context.Context, g groups = tmpGroups } - ps.CompactProgressMetrics.NumberOfCompactionRuns.Set(0) - ps.CompactProgressMetrics.NumberOfCompactionBlocks.Set(0) + ps.NumberOfCompactionRuns.Set(0) + ps.NumberOfCompactionBlocks.Set(0) for key, iters := range groupCompactions { - ps.CompactProgressMetrics.NumberOfCompactionRuns.Add(float64(iters)) - ps.CompactProgressMetrics.NumberOfCompactionBlocks.Add(float64(groupBlocks[key])) + ps.NumberOfCompactionRuns.Add(float64(iters)) + ps.NumberOfCompactionBlocks.Add(float64(groupBlocks[key])) } return nil @@ -748,9 +755,9 @@ func (ds *DownsampleProgressCalculator) ProgressCalculate(ctx context.Context, g } } - ds.DownsampleProgressMetrics.NumberOfBlocksDownsampled.Set(0) + ds.NumberOfBlocksDownsampled.Set(0) for _, blocks := range groupBlocks { - ds.DownsampleProgressMetrics.NumberOfBlocksDownsampled.Add(float64(blocks)) + ds.NumberOfBlocksDownsampled.Add(float64(blocks)) } return nil @@ -797,9 +804,9 @@ func (rs *RetentionProgressCalculator) ProgressCalculate(ctx context.Context, gr } } - rs.RetentionProgressMetrics.NumberOfBlocksToDelete.Set(0) + rs.NumberOfBlocksToDelete.Set(0) for _, blocks := range groupBlocks { - rs.RetentionProgressMetrics.NumberOfBlocksToDelete.Add(float64(blocks)) + rs.NumberOfBlocksToDelete.Add(float64(blocks)) } return nil @@ -1379,6 +1386,7 @@ type BucketCompactor struct { bkt objstore.Bucket concurrency int skipBlocksWithOutOfOrderChunks bool + blocksCleaner *BlocksCleaner } // NewBucketCompactor creates a new bucket compactor. @@ -1392,6 +1400,7 @@ func NewBucketCompactor( bkt objstore.Bucket, concurrency int, skipBlocksWithOutOfOrderChunks bool, + blocksCleaner *BlocksCleaner, ) (*BucketCompactor, error) { if concurrency <= 0 { return nil, errors.Errorf("invalid concurrency level (%d), concurrency level must be > 0", concurrency) @@ -1408,6 +1417,7 @@ func NewBucketCompactor( bkt, concurrency, skipBlocksWithOutOfOrderChunks, + blocksCleaner, ) } @@ -1423,6 +1433,7 @@ func NewBucketCompactorWithCheckerAndCallback( bkt objstore.Bucket, concurrency int, skipBlocksWithOutOfOrderChunks bool, + blocksCleaner *BlocksCleaner, ) (*BucketCompactor, error) { if concurrency <= 0 { return nil, errors.Errorf("invalid concurrency level (%d), concurrency level must be > 0", concurrency) @@ -1439,6 +1450,7 @@ func NewBucketCompactorWithCheckerAndCallback( bkt: bkt, concurrency: concurrency, skipBlocksWithOutOfOrderChunks: skipBlocksWithOutOfOrderChunks, + blocksCleaner: blocksCleaner, }, nil } @@ -1471,9 +1483,7 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) { // Set up workers who will compact the groups when the groups are ready. // They will compact available groups until they encounter an error, after which they will stop. for i := 0; i < c.concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { for g := range groupChan { shouldRerunGroup, _, err := g.Compact(workCtx, c.compactDir, c.planner, c.comp, c.blockDeletableChecker, c.compactionLifecycleCallback) if err == nil { @@ -1513,18 +1523,27 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) { errChan <- errors.Wrapf(err, "group %s", g.Key()) return } - }() + }) } - level.Info(c.logger).Log("msg", "start sync of metas") + // Blocks that were compacted are garbage collected after each Compaction. + // However if compactor crashes we need to resolve those on startup. + level.Info(c.logger).Log("msg", "start initial sync of metas") if err := c.sy.SyncMetas(ctx); err != nil { return errors.Wrap(err, "sync") } - level.Info(c.logger).Log("msg", "start of GC") - // Blocks that were compacted are garbage collected after each Compaction. - // However if compactor crashes we need to resolve those on startup. - if err := c.sy.GarbageCollect(ctx); err != nil { + var ignoreBlocks map[ulid.ULID]struct{} + if c.blocksCleaner != nil { + deletedBlocks, err := c.blocksCleaner.DeleteMarkedBlocks(ctx) + if err != nil { + return errors.Wrap(err, "cleaning marked blocks") + } + ignoreBlocks = deletedBlocks + } + + level.Info(c.logger).Log("msg", "start of initial garbage collection") + if err := c.sy.GarbageCollect(ctx, ignoreBlocks); err != nil { return errors.Wrap(err, "garbage") } @@ -1610,9 +1629,7 @@ func NewGatherNoCompactionMarkFilter(logger log.Logger, bkt objstore.Instrumente func (f *GatherNoCompactionMarkFilter) NoCompactMarkedBlocks() map[ulid.ULID]*metadata.NoCompactMark { f.mtx.Lock() copiedNoCompactMarked := make(map[ulid.ULID]*metadata.NoCompactMark, len(f.noCompactMarkedMap)) - for k, v := range f.noCompactMarkedMap { - copiedNoCompactMarked[k] = v - } + maps.Copy(copiedNoCompactMarked, f.noCompactMarkedMap) f.mtx.Unlock() return copiedNoCompactMarked diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index dff0ca2f511..f6a6f9472ee 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -6,6 +6,7 @@ package downsample import ( "context" "fmt" + "maps" "math" "math/rand" "os" @@ -171,7 +172,7 @@ func Downsample( // Raw and already downsampled data need different processing. if origMeta.Thanos.Downsample.Resolution == 0 { - var prevEnc chunkenc.Encoding = chks[0].Chunk.Encoding() + var prevEnc = chks[0].Chunk.Encoding() for _, c := range chks { if cutNewChunk(c.Chunk.Encoding(), prevEnc) { @@ -662,10 +663,7 @@ func downsampleRawLoop( batchSize := (len(data) / numChunks) + 1 for len(data) > 0 { - j := batchSize - if j > len(data) { - j = len(data) - } + j := min(batchSize, len(data)) curW := currentWindow(data[j-1].t, resolution) // The batch we took might end in the middle of a downsampling window. We additionally grab @@ -832,14 +830,12 @@ func downsampleBatch(data []sample, resolution int64, aggr sampleAggregator, add add(nextT, aggr) } aggr.reset() - nextT = currentWindow(s.t, resolution) - // Limit next timestamp to not go beyond the batch. A subsequent batch - // may overlap in time range otherwise. - // We have aligned batches for raw downsamplings but subsequent downsamples - // are forced to be chunk-boundary aligned and cannot guarantee this. - if nextT > lastT { - nextT = lastT - } + nextT = min( + // Limit next timestamp to not go beyond the batch. A subsequent batch + // may overlap in time range otherwise. + // We have aligned batches for raw downsamplings but subsequent downsamples + // are forced to be chunk-boundary aligned and cannot guarantee this. + currentWindow(s.t, resolution), lastT) } aggr.add(s) } @@ -921,10 +917,7 @@ func downsampleAggrLoop( batchSize := len(chks) / numChunks for len(chks) > 0 { - j := batchSize - if j > len(chks) { - j = len(chks) - } + j := min(batchSize, len(chks)) part := chks[:j] chks = chks[j:] @@ -1352,9 +1345,7 @@ func NewGatherNoDownsampleMarkFilter(logger log.Logger, bkt objstore.Instrumente func (f *GatherNoDownsampleMarkFilter) NoDownsampleMarkedBlocks() map[ulid.ULID]*metadata.NoDownsampleMark { f.mtx.Lock() copiedNoDownsampleMarked := make(map[ulid.ULID]*metadata.NoDownsampleMark, len(f.noDownsampleMarkedMap)) - for k, v := range f.noDownsampleMarkedMap { - copiedNoDownsampleMarked[k] = v - } + maps.Copy(copiedNoDownsampleMarked, f.noDownsampleMarkedMap) f.mtx.Unlock() return copiedNoDownsampleMarked diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/pool.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/pool.go index 68dec1227e3..85f9f8e0ff0 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/pool.go @@ -21,7 +21,7 @@ func NewPool() chunkenc.Pool { return &pool{ wrapped: chunkenc.NewPool(), aggr: sync.Pool{ - New: func() interface{} { + New: func() any { return &AggrChunk{} }, }, diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go index 3972d608430..82675a762d9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go @@ -6,6 +6,7 @@ package compact import ( "context" "fmt" + "maps" "math" "path/filepath" @@ -307,12 +308,8 @@ func WithLargeTotalIndexSizeFilter(with *tsdbBasedPlanner, bkt objstore.Bucket, func (t *largeTotalIndexSizeFilter) plan(ctx context.Context, extraNoCompactMarked map[ulid.ULID]*metadata.NoCompactMark, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) { noCompactMarked := t.noCompBlocksFunc() copiedNoCompactMarked := make(map[ulid.ULID]*metadata.NoCompactMark, len(noCompactMarked)+len(extraNoCompactMarked)) - for k, v := range noCompactMarked { - copiedNoCompactMarked[k] = v - } - for k, v := range extraNoCompactMarked { - copiedNoCompactMarked[k] = v - } + maps.Copy(copiedNoCompactMarked, noCompactMarked) + maps.Copy(copiedNoCompactMarked, extraNoCompactMarked) PlanLoop: for { diff --git a/vendor/github.com/thanos-io/thanos/pkg/dedup/chunk_iter.go b/vendor/github.com/thanos-io/thanos/pkg/dedup/chunk_iter.go index 1401767f549..111dedd379e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/dedup/chunk_iter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/dedup/chunk_iter.go @@ -138,11 +138,11 @@ func (h chunkIteratorHeap) Less(i, j int) bool { return at.MinTime < bt.MinTime } -func (h *chunkIteratorHeap) Push(x interface{}) { +func (h *chunkIteratorHeap) Push(x any) { *h = append(*h, x.(chunks.Iterator)) } -func (h *chunkIteratorHeap) Pop() interface{} { +func (h *chunkIteratorHeap) Pop() any { old := *h n := len(old) x := old[n-1] diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go index 7971e7991cb..6ae3ef2435f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go @@ -9,6 +9,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/go-kit/log/level" grpcresolver "google.golang.org/grpc/resolver" ) @@ -44,6 +45,13 @@ func (b *builder) Build(t grpcresolver.Target, cc grpcresolver.ClientConn, _ grp interval: b.resolveInterval, logger: b.logger, } + + // perform initial, synchronous resolution to populate the state. + level.Info(r.logger).Log("msg", "performing initial gRPC endpoint resolution", "target", r.target) + if err := r.updateResolver(); err != nil { + level.Error(r.logger).Log("msg", "initial gRPC endpoint resolution failed", "target", r.target, "err", err) + } + r.wg.Add(1) go r.run() @@ -80,32 +88,36 @@ func (r *resolver) addresses() []string { return r.provider.AddressesForHost(r.target) } +func (r *resolver) updateResolver() error { + if err := r.resolve(); err != nil { + r.cc.ReportError(err) + return err + } + state := grpcresolver.State{} + addrs := r.addresses() + if len(addrs) == 0 { + level.Info(r.logger).Log("msg", "no addresses resolved", "target", r.target) + return nil + } + for _, addr := range addrs { + state.Addresses = append(state.Addresses, grpcresolver.Address{Addr: addr}) + } + if err := r.cc.UpdateState(state); err != nil { + return err + } + return nil +} + func (r *resolver) run() { defer r.wg.Done() for { - func() { - if err := r.resolve(); err != nil { - r.cc.ReportError(err) - r.logger.Log("msg", "failed to resolve", "err", err) - return - } - state := grpcresolver.State{} - addrs := r.addresses() - if len(addrs) == 0 { - r.logger.Log("msg", "no addresses resolved", "target", r.target) - return - } - for _, addr := range addrs { - state.Addresses = append(state.Addresses, grpcresolver.Address{Addr: addr}) - } - if err := r.cc.UpdateState(state); err != nil { - r.logger.Log("msg", "failed to update state", "err", err) - } - }() select { case <-r.ctx.Done(): return case <-time.After(r.interval): + if err := r.updateResolver(); err != nil { + level.Error(r.logger).Log("msg", "failed to update state for gRPC resolver", "err", err) + } } } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go index f9c1c7f583b..478d8990eb3 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go @@ -5,6 +5,7 @@ package dns import ( "context" + "maps" "net" "strings" "sync" @@ -143,9 +144,7 @@ func (p *Provider) Resolve(ctx context.Context, addrs []string, flushOld bool) e if flushOld && len(errs) == 0 { p.resolved = map[string][]string{} } - for name, addrs := range resolvedAddrs { - p.resolved[name] = addrs - } + maps.Copy(p.resolved, resolvedAddrs) for name, addrs := range p.resolved { p.resolverAddrs.WithLabelValues(name).Set(float64(len(addrs))) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/memcache/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/memcache/provider.go index 1560c559519..30e8c346011 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/memcache/provider.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/memcache/provider.go @@ -6,6 +6,7 @@ package memcache import ( "context" "fmt" + "maps" "sync" "time" @@ -94,9 +95,7 @@ func (p *Provider) Resolve(ctx context.Context, addresses []string, flushOld boo if flushOld && len(errs) == 0 { p.clusterConfigs = map[string]*clusterConfig{} } - for addr, config := range clusterConfigs { - p.clusterConfigs[addr] = config - } + maps.Copy(p.clusterConfigs, clusterConfigs) return errs.Err() } diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/memcache/resolver.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/memcache/resolver.go index 4e7406cfba7..10bb7c37adc 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/memcache/resolver.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/memcache/resolver.go @@ -92,10 +92,10 @@ func (s *memcachedAutoDiscovery) parseConfig(reader *bufio.Reader) (*clusterConf } if len(configVersion)+len(nodes) != configSize { - return nil, fmt.Errorf("expected %d in config payload, but got %d instead.", configSize, len(configVersion)+len(nodes)) + return nil, fmt.Errorf("expected %d in config payload, but got %d instead", configSize, len(configVersion)+len(nodes)) } - for _, host := range strings.Split(strings.TrimSpace(nodes), " ") { + for host := range strings.SplitSeq(strings.TrimSpace(nodes), " ") { dnsIpPort := strings.Split(host, "|") if len(dnsIpPort) != 3 { return nil, fmt.Errorf("node not in expected format: %s", dnsIpPort) diff --git a/vendor/github.com/thanos-io/thanos/pkg/extgrpc/snappy/snappy.go b/vendor/github.com/thanos-io/thanos/pkg/extgrpc/snappy/snappy.go index 45f7cfc3699..c805c68fb3a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/extgrpc/snappy/snappy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/extgrpc/snappy/snappy.go @@ -28,12 +28,12 @@ type compressor struct { func newCompressor() *compressor { c := &compressor{} c.readersPool = sync.Pool{ - New: func() interface{} { + New: func() any { return snappy.NewReader(nil) }, } c.writersPool = sync.Pool{ - New: func() interface{} { + New: func() any { return snappy.NewBufferedWriter(nil) }, } diff --git a/vendor/github.com/thanos-io/thanos/pkg/extpromql/parser.go b/vendor/github.com/thanos-io/thanos/pkg/extpromql/parser.go index 8b3cc46252b..4f3fd950bc4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/extpromql/parser.go +++ b/vendor/github.com/thanos-io/thanos/pkg/extpromql/parser.go @@ -5,6 +5,7 @@ package extpromql import ( "fmt" + "maps" "strings" "github.com/pkg/errors" @@ -17,12 +18,8 @@ import ( // ParseExpr parses the input PromQL expression and returns the parsed representation. func ParseExpr(input string) (parser.Expr, error) { allFuncs := make(map[string]*parser.Function, len(parse.XFunctions)+len(parser.Functions)) - for k, v := range parser.Functions { - allFuncs[k] = v - } - for k, v := range parse.XFunctions { - allFuncs[k] = v - } + maps.Copy(allFuncs, parser.Functions) + maps.Copy(allFuncs, parse.XFunctions) p := parser.NewParser(input, parser.WithFunctions(allFuncs)) defer p.Close() return p.ParseExpr() diff --git a/vendor/github.com/thanos-io/thanos/pkg/info/infopb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/info/infopb/rpc.pb.go index c88be63cfc1..e22b7a50087 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/info/infopb/rpc.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/info/infopb/rpc.pb.go @@ -80,6 +80,8 @@ type InfoResponse struct { Exemplars *ExemplarsInfo `protobuf:"bytes,7,opt,name=exemplars,proto3" json:"exemplars,omitempty"` // QueryAPIInfo holds the metadata related to Query API if exposed by the component, otherwise it will be null. Query *QueryAPIInfo `protobuf:"bytes,8,opt,name=query,proto3" json:"query,omitempty"` + // StatusInfo holds the metadata related to Status API if exposed by the component, otherwise it will be null. + Status *StatusInfo `protobuf:"bytes,9,opt,name=status,proto3" json:"status,omitempty"` } func (m *InfoResponse) Reset() { *m = InfoResponse{} } @@ -346,6 +348,43 @@ func (m *QueryAPIInfo) XXX_DiscardUnknown() { var xxx_messageInfo_QueryAPIInfo proto.InternalMessageInfo +// StatusInfo holds the metadata related to Status API exposed by the component. +type StatusInfo struct { +} + +func (m *StatusInfo) Reset() { *m = StatusInfo{} } +func (m *StatusInfo) String() string { return proto.CompactTextString(m) } +func (*StatusInfo) ProtoMessage() {} +func (*StatusInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_a1214ec45d2bf952, []int{8} +} +func (m *StatusInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusInfo.Merge(m, src) +} +func (m *StatusInfo) XXX_Size() int { + return m.Size() +} +func (m *StatusInfo) XXX_DiscardUnknown() { + xxx_messageInfo_StatusInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusInfo proto.InternalMessageInfo + type TSDBInfo struct { Labels labelpb.ZLabelSet `protobuf:"bytes,1,opt,name=labels,proto3" json:"labels"` MinTime int64 `protobuf:"varint,2,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` @@ -356,7 +395,7 @@ func (m *TSDBInfo) Reset() { *m = TSDBInfo{} } func (m *TSDBInfo) String() string { return proto.CompactTextString(m) } func (*TSDBInfo) ProtoMessage() {} func (*TSDBInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_a1214ec45d2bf952, []int{8} + return fileDescriptor_a1214ec45d2bf952, []int{9} } func (m *TSDBInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -394,50 +433,53 @@ func init() { proto.RegisterType((*TargetsInfo)(nil), "thanos.info.TargetsInfo") proto.RegisterType((*ExemplarsInfo)(nil), "thanos.info.ExemplarsInfo") proto.RegisterType((*QueryAPIInfo)(nil), "thanos.info.QueryAPIInfo") + proto.RegisterType((*StatusInfo)(nil), "thanos.info.StatusInfo") proto.RegisterType((*TSDBInfo)(nil), "thanos.info.TSDBInfo") } func init() { proto.RegisterFile("info/infopb/rpc.proto", fileDescriptor_a1214ec45d2bf952) } var fileDescriptor_a1214ec45d2bf952 = []byte{ - // 589 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x8a, 0xda, 0x40, - 0x14, 0xc6, 0x8d, 0x7f, 0xe3, 0x71, 0xdd, 0xee, 0x0e, 0xbb, 0x25, 0x4a, 0x89, 0x12, 0xf6, 0x42, - 0x68, 0x31, 0x60, 0xa1, 0x94, 0xf6, 0xaa, 0x6e, 0x85, 0x6e, 0xe9, 0x42, 0x1b, 0x85, 0xc2, 0xde, - 0x84, 0xa8, 0xb3, 0x1a, 0x48, 0x32, 0x63, 0x66, 0xa4, 0xfa, 0x16, 0x7d, 0x95, 0xbe, 0x85, 0x97, - 0x7b, 0xd9, 0xab, 0xd2, 0xea, 0x43, 0xf4, 0xb6, 0xcc, 0x4c, 0x62, 0x0d, 0xdd, 0xbd, 0xe9, 0x8d, - 0x66, 0xe6, 0xfb, 0x9d, 0xc9, 0x39, 0xdf, 0x39, 0x13, 0x38, 0xf7, 0xa3, 0x5b, 0x62, 0x8b, 0x1f, - 0x3a, 0xb6, 0x63, 0x3a, 0xe9, 0xd2, 0x98, 0x70, 0x82, 0x6a, 0x7c, 0xee, 0x45, 0x84, 0x75, 0x85, - 0xd0, 0x6c, 0x30, 0x4e, 0x62, 0x6c, 0x07, 0xde, 0x18, 0x07, 0x74, 0x6c, 0xf3, 0x35, 0xc5, 0x4c, - 0x71, 0xcd, 0xb3, 0x19, 0x99, 0x11, 0xf9, 0x68, 0x8b, 0x27, 0xb5, 0x6b, 0xd5, 0xa1, 0x76, 0x15, - 0xdd, 0x12, 0x07, 0x2f, 0x96, 0x98, 0x71, 0xeb, 0x5b, 0x01, 0x8e, 0xd4, 0x9a, 0x51, 0x12, 0x31, - 0x8c, 0x5e, 0x00, 0xc8, 0xc3, 0x5c, 0x86, 0x39, 0x33, 0xb4, 0x76, 0xa1, 0x53, 0xeb, 0x9d, 0x76, - 0x93, 0x57, 0xde, 0x7c, 0x10, 0xd2, 0x10, 0xf3, 0x7e, 0x71, 0xf3, 0xa3, 0x95, 0x73, 0xaa, 0x41, - 0xb2, 0x66, 0xe8, 0x02, 0xea, 0x97, 0x24, 0xa4, 0x24, 0xc2, 0x11, 0x1f, 0xad, 0x29, 0x36, 0xf2, - 0x6d, 0xad, 0x53, 0x75, 0xb2, 0x9b, 0xe8, 0x19, 0x94, 0x64, 0xc2, 0x46, 0xa1, 0xad, 0x75, 0x6a, - 0xbd, 0xc7, 0xdd, 0x83, 0x5a, 0xba, 0x43, 0xa1, 0xc8, 0x64, 0x14, 0x24, 0xe8, 0x78, 0x19, 0x60, - 0x66, 0x14, 0xef, 0xa1, 0x1d, 0xa1, 0x28, 0x5a, 0x42, 0xe8, 0x1d, 0x3c, 0x0a, 0x31, 0x8f, 0xfd, - 0x89, 0x1b, 0x62, 0xee, 0x4d, 0x3d, 0xee, 0x19, 0x25, 0x19, 0xd7, 0xca, 0xc4, 0x5d, 0x4b, 0xe6, - 0x3a, 0x41, 0xe4, 0x01, 0xc7, 0x61, 0x66, 0x0f, 0xf5, 0xa0, 0xc2, 0xbd, 0x78, 0x26, 0x0c, 0x28, - 0xcb, 0x13, 0x8c, 0xcc, 0x09, 0x23, 0xa5, 0xc9, 0xd0, 0x14, 0x44, 0x2f, 0xa1, 0x8a, 0x57, 0x38, - 0xa4, 0x81, 0x17, 0x33, 0xa3, 0x22, 0xa3, 0x9a, 0x99, 0xa8, 0x41, 0xaa, 0xca, 0xb8, 0xbf, 0x30, - 0xb2, 0xa1, 0xb4, 0x58, 0xe2, 0x78, 0x6d, 0xe8, 0x32, 0xaa, 0x91, 0x89, 0xfa, 0x24, 0x94, 0x37, - 0x1f, 0xaf, 0x54, 0xa1, 0x92, 0xb3, 0x7e, 0x6b, 0x50, 0xdd, 0x7b, 0x85, 0x1a, 0xa0, 0x87, 0x7e, - 0xe4, 0x72, 0x3f, 0xc4, 0x86, 0xd6, 0xd6, 0x3a, 0x05, 0xa7, 0x12, 0xfa, 0xd1, 0xc8, 0x0f, 0xb1, - 0x94, 0xbc, 0x95, 0x92, 0xf2, 0x89, 0xe4, 0xad, 0xa4, 0xf4, 0x14, 0x4e, 0xd9, 0x92, 0x52, 0x12, - 0x73, 0xe6, 0xb2, 0xb9, 0x17, 0x4f, 0xfd, 0x68, 0x26, 0x9b, 0xa2, 0x3b, 0x27, 0xa9, 0x30, 0x4c, - 0xf6, 0xd1, 0x00, 0x5a, 0x7b, 0xf8, 0x8b, 0xcf, 0xe7, 0x64, 0xc9, 0xdd, 0x18, 0xd3, 0xc0, 0x9f, - 0x78, 0xae, 0x9c, 0x00, 0x26, 0x9d, 0xd6, 0x9d, 0x27, 0x29, 0xf6, 0x59, 0x51, 0x8e, 0x82, 0xe4, - 0xd4, 0x30, 0xf4, 0x0a, 0x80, 0xb3, 0xe9, 0xd8, 0x15, 0x85, 0x09, 0x67, 0xc5, 0x68, 0x9d, 0x67, - 0x9d, 0x1d, 0xbe, 0xed, 0x8b, 0xa2, 0xd2, 0xf1, 0x12, 0xb8, 0x58, 0xb3, 0xf7, 0x45, 0xbd, 0x78, - 0x52, 0xb2, 0x6a, 0x50, 0xdd, 0xb7, 0xdd, 0x3a, 0x03, 0xf4, 0x6f, 0x2f, 0xc5, 0x7c, 0x1f, 0xf4, - 0xc7, 0x1a, 0x40, 0x3d, 0x63, 0xfc, 0xff, 0xd9, 0x65, 0x1d, 0xc3, 0xd1, 0x61, 0x27, 0xac, 0x05, - 0xe8, 0x69, 0xae, 0xc8, 0x86, 0x72, 0x62, 0x82, 0x26, 0x1b, 0xf8, 0xe0, 0x6d, 0x49, 0xb0, 0x4c, - 0x0a, 0xf9, 0x87, 0x53, 0x28, 0x64, 0x52, 0xe8, 0x5d, 0x42, 0x51, 0xbe, 0xee, 0x75, 0xf2, 0x9f, - 0x9d, 0xc9, 0x83, 0x3b, 0xdd, 0x6c, 0xdc, 0xa3, 0xa8, 0xdb, 0xdd, 0xbf, 0xd8, 0xfc, 0x32, 0x73, - 0x9b, 0xad, 0xa9, 0xdd, 0x6d, 0x4d, 0xed, 0xe7, 0xd6, 0xd4, 0xbe, 0xee, 0xcc, 0xdc, 0xdd, 0xce, - 0xcc, 0x7d, 0xdf, 0x99, 0xb9, 0x9b, 0xb2, 0xfa, 0xd6, 0x8c, 0xcb, 0xf2, 0x53, 0xf1, 0xfc, 0x4f, - 0x00, 0x00, 0x00, 0xff, 0xff, 0xf1, 0x5f, 0x0a, 0x2f, 0x81, 0x04, 0x00, 0x00, + // 610 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x4f, 0xdb, 0x30, + 0x18, 0xc6, 0x1b, 0xfa, 0x87, 0xe6, 0x2d, 0x30, 0xb0, 0x60, 0x4b, 0xab, 0x29, 0x54, 0x11, 0x87, + 0x4a, 0x9b, 0x1a, 0xa9, 0x93, 0xa6, 0x69, 0x3b, 0x0d, 0x86, 0x34, 0xa6, 0x21, 0x6d, 0x29, 0xd2, + 0x24, 0x2e, 0x91, 0x0b, 0xa6, 0x44, 0x4a, 0x62, 0x13, 0x3b, 0x1a, 0x7c, 0x8b, 0x7d, 0x2c, 0x8e, + 0x1c, 0x77, 0x9a, 0xb6, 0xf2, 0x21, 0x76, 0x9d, 0xfc, 0x3a, 0x29, 0xcd, 0x06, 0x97, 0x5d, 0xda, + 0xd8, 0xcf, 0xef, 0x71, 0x5e, 0xfb, 0x79, 0x63, 0xd8, 0x8a, 0xd2, 0x33, 0xee, 0xeb, 0x1f, 0x31, + 0xf1, 0x33, 0x71, 0x32, 0x14, 0x19, 0x57, 0x9c, 0x74, 0xd4, 0x39, 0x4d, 0xb9, 0x1c, 0x6a, 0xa1, + 0xd7, 0x95, 0x8a, 0x67, 0xcc, 0x8f, 0xe9, 0x84, 0xc5, 0x62, 0xe2, 0xab, 0x2b, 0xc1, 0xa4, 0xe1, + 0x7a, 0x9b, 0x53, 0x3e, 0xe5, 0xf8, 0xe8, 0xeb, 0x27, 0x33, 0xeb, 0xad, 0x42, 0xe7, 0x20, 0x3d, + 0xe3, 0x01, 0xbb, 0xc8, 0x99, 0x54, 0xde, 0xac, 0x0e, 0x2b, 0x66, 0x2c, 0x05, 0x4f, 0x25, 0x23, + 0x2f, 0x01, 0x70, 0xb1, 0x50, 0x32, 0x25, 0x1d, 0xab, 0x5f, 0x1f, 0x74, 0x46, 0x1b, 0xc3, 0xe2, + 0x95, 0xc7, 0x1f, 0xb5, 0x34, 0x66, 0x6a, 0xb7, 0x71, 0xfd, 0x63, 0xbb, 0x16, 0xd8, 0x71, 0x31, + 0x96, 0x64, 0x07, 0x56, 0xf7, 0x78, 0x22, 0x78, 0xca, 0x52, 0x75, 0x74, 0x25, 0x98, 0xb3, 0xd4, + 0xb7, 0x06, 0x76, 0x50, 0x9d, 0x24, 0xcf, 0xa1, 0x89, 0x05, 0x3b, 0xf5, 0xbe, 0x35, 0xe8, 0x8c, + 0x1e, 0x0f, 0x17, 0xf6, 0x32, 0x1c, 0x6b, 0x05, 0x8b, 0x31, 0x90, 0xa6, 0xb3, 0x3c, 0x66, 0xd2, + 0x69, 0xdc, 0x43, 0x07, 0x5a, 0x31, 0x34, 0x42, 0xe4, 0x3d, 0x3c, 0x4a, 0x98, 0xca, 0xa2, 0x93, + 0x30, 0x61, 0x8a, 0x9e, 0x52, 0x45, 0x9d, 0x26, 0xfa, 0xb6, 0x2b, 0xbe, 0x43, 0x64, 0x0e, 0x0b, + 0x04, 0x17, 0x58, 0x4b, 0x2a, 0x73, 0x64, 0x04, 0xcb, 0x8a, 0x66, 0x53, 0x7d, 0x00, 0x2d, 0x5c, + 0xc1, 0xa9, 0xac, 0x70, 0x64, 0x34, 0xb4, 0x96, 0x20, 0x79, 0x05, 0x36, 0xbb, 0x64, 0x89, 0x88, + 0x69, 0x26, 0x9d, 0x65, 0x74, 0xf5, 0x2a, 0xae, 0xfd, 0x52, 0x45, 0xdf, 0x1d, 0x4c, 0x7c, 0x68, + 0x5e, 0xe4, 0x2c, 0xbb, 0x72, 0xda, 0xe8, 0xea, 0x56, 0x5c, 0x9f, 0xb5, 0xf2, 0xf6, 0xd3, 0x81, + 0xd9, 0x28, 0x72, 0xc4, 0x87, 0x96, 0x54, 0x54, 0xe5, 0xd2, 0xb1, 0xd1, 0xf1, 0xe4, 0xaf, 0x53, + 0xd4, 0x12, 0xf2, 0x05, 0xe6, 0xfd, 0xb6, 0xc0, 0x9e, 0x1f, 0x2e, 0xe9, 0x42, 0x3b, 0x89, 0xd2, + 0x50, 0x45, 0x09, 0x73, 0xac, 0xbe, 0x35, 0xa8, 0x07, 0xcb, 0x49, 0x94, 0x1e, 0x45, 0x09, 0x43, + 0x89, 0x5e, 0x1a, 0x69, 0xa9, 0x90, 0xe8, 0x25, 0x4a, 0xcf, 0x60, 0x43, 0xe6, 0x42, 0xf0, 0x4c, + 0xc9, 0x50, 0x9e, 0xd3, 0xec, 0x34, 0x4a, 0xa7, 0x98, 0x62, 0x3b, 0x58, 0x2f, 0x85, 0x71, 0x31, + 0x4f, 0xf6, 0x61, 0x7b, 0x0e, 0x7f, 0x8d, 0xd4, 0x39, 0xcf, 0x55, 0x98, 0x31, 0x11, 0x47, 0x27, + 0x34, 0xc4, 0x96, 0x91, 0x18, 0x4d, 0x3b, 0x78, 0x5a, 0x62, 0x5f, 0x0c, 0x15, 0x18, 0x08, 0xdb, + 0x4c, 0x92, 0xd7, 0x00, 0x4a, 0x9e, 0x4e, 0x42, 0xbd, 0x2f, 0x1d, 0x85, 0xee, 0xc5, 0xad, 0x6a, + 0x14, 0xe3, 0x77, 0xbb, 0x7a, 0x53, 0x65, 0x3f, 0x6a, 0x5c, 0x8f, 0xe5, 0x87, 0x46, 0xbb, 0xb1, + 0xde, 0xf4, 0x3a, 0x60, 0xcf, 0xfb, 0xc4, 0xdb, 0x04, 0xf2, 0x6f, 0xf8, 0xfa, 0x83, 0x58, 0x08, + 0xd4, 0xdb, 0x87, 0xd5, 0x4a, 0x52, 0xff, 0x77, 0x5c, 0xde, 0x1a, 0xac, 0x2c, 0x46, 0xe7, 0xad, + 0x00, 0xdc, 0x05, 0xe3, 0x5d, 0x40, 0xbb, 0xac, 0x5c, 0xa7, 0x59, 0x1c, 0x89, 0x85, 0x69, 0x3e, + 0xf8, 0xb1, 0x15, 0x58, 0xa5, 0xa0, 0xa5, 0x87, 0x0b, 0xaa, 0x57, 0x0a, 0x1a, 0xed, 0x41, 0x03, + 0x5f, 0xf7, 0xa6, 0xf8, 0xaf, 0xb6, 0xf4, 0xc2, 0x95, 0xd0, 0xeb, 0xde, 0xa3, 0x98, 0xcb, 0x61, + 0x77, 0xe7, 0xfa, 0x97, 0x5b, 0xbb, 0x9e, 0xb9, 0xd6, 0xcd, 0xcc, 0xb5, 0x7e, 0xce, 0x5c, 0xeb, + 0xdb, 0xad, 0x5b, 0xbb, 0xb9, 0x75, 0x6b, 0xdf, 0x6f, 0xdd, 0xda, 0x71, 0xcb, 0x5c, 0x55, 0x93, + 0x16, 0xde, 0x34, 0x2f, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xe4, 0xc4, 0xbd, 0xc0, 0x04, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -565,6 +607,18 @@ func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } if m.Query != nil { { size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) @@ -853,6 +907,29 @@ func (m *QueryAPIInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatusInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + func (m *TSDBInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -956,6 +1033,10 @@ func (m *InfoResponse) Size() (n int) { l = m.Query.Size() n += 1 + l + sovRpc(uint64(l)) } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovRpc(uint64(l)) + } return n } @@ -1037,6 +1118,15 @@ func (m *QueryAPIInfo) Size() (n int) { return n } +func (m *StatusInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + func (m *TSDBInfo) Size() (n int) { if m == nil { return 0 @@ -1421,6 +1511,42 @@ func (m *InfoResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &StatusInfo{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -1892,6 +2018,56 @@ func (m *QueryAPIInfo) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatusInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TSDBInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/thanos-io/thanos/pkg/info/infopb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/info/infopb/rpc.proto index 9f0db3709da..9e8dbee1db8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/info/infopb/rpc.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/info/infopb/rpc.proto @@ -49,6 +49,9 @@ message InfoResponse { // QueryAPIInfo holds the metadata related to Query API if exposed by the component, otherwise it will be null. QueryAPIInfo query = 8; + + // StatusInfo holds the metadata related to Status API if exposed by the component, otherwise it will be null. + StatusInfo status = 9; } // StoreInfo holds the metadata related to Store API exposed by the component. @@ -88,6 +91,10 @@ message ExemplarsInfo { message QueryAPIInfo { } +// StatusInfo holds the metadata related to Status API exposed by the component. +message StatusInfo { +} + message TSDBInfo { ZLabelSet labels = 1 [(gogoproto.nullable) = false]; diff --git a/vendor/github.com/thanos-io/thanos/pkg/model/units.go b/vendor/github.com/thanos-io/thanos/pkg/model/units.go index cd4944f6d7e..19dbb5b58a4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/model/units.go +++ b/vendor/github.com/thanos-io/thanos/pkg/model/units.go @@ -11,7 +11,7 @@ import ( // with units. type Bytes uint64 -func (b *Bytes) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (b *Bytes) UnmarshalYAML(unmarshal func(any) error) error { var value string err := unmarshal(&value) if err != nil { @@ -27,6 +27,6 @@ func (b *Bytes) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -func (b *Bytes) MarshalYAML() (interface{}, error) { +func (b *Bytes) MarshalYAML() (any, error) { return units.Base2Bytes(*b).String(), nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go index b655ea1ab15..b07e44722b3 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go +++ b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go @@ -39,6 +39,7 @@ import ( "github.com/thanos-io/thanos/pkg/metadata/metadatapb" "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/runutil" + "github.com/thanos-io/thanos/pkg/status/statuspb" "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/targets/targetspb" "github.com/thanos-io/thanos/pkg/tracing" @@ -736,7 +737,7 @@ func formatTime(t time.Time) string { return strconv.FormatFloat(float64(t.Unix())+float64(t.Nanosecond())/1e9, 'f', -1, 64) } -func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string, u *url.URL, data interface{}) error { +func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string, u *url.URL, data any) error { span, ctx := tracing.StartSpan(ctx, spanName) defer span.Finish() @@ -753,9 +754,9 @@ func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string } var m struct { - Data interface{} `json:"data"` - Status string `json:"status"` - Error string `json:"error"` + Data any `json:"data"` + Status string `json:"status"` + Error string `json:"error"` } if err = json.Unmarshal(body, &m); err != nil { @@ -954,3 +955,19 @@ func (c *Client) TargetsInGRPC(ctx context.Context, base *url.URL, stateTargets } return v.Data, c.get2xxResultWithGRPCErrors(ctx, "/prom_targets HTTP[client]", &u, &v) } + +func (c *Client) TSDBStatusInGRPC(ctx context.Context, base *url.URL, limit int) (*statuspb.TSDBStatisticsEntry, error) { + u := *base + u.Path = path.Join(u.Path, "/api/v1/status/tsdb") + + if limit > 0 { + q := u.Query() + q.Add("limit", strconv.Itoa(limit)) + u.RawQuery = q.Encode() + } + + var v struct { + Data *statuspb.TSDBStatisticsEntry `json:"data"` + } + return v.Data, c.get2xxResultWithGRPCErrors(ctx, "/prom_status_tsdb HTTP[client]", &u, &v) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go index 2f023fb3d15..bbf2612972a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go @@ -29,6 +29,7 @@ import ( "github.com/thanos-io/thanos/pkg/metadata/metadatapb" "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/runutil" + "github.com/thanos-io/thanos/pkg/status/statuspb" "github.com/thanos-io/thanos/pkg/store" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" @@ -220,6 +221,10 @@ type EndpointSet struct { endpointsMtx sync.RWMutex endpoints map[string]*endpointRef endpointsMetric *endpointSetNodeCollector + + // Track if the first update has completed + firstUpdateOnce sync.Once + firstUpdateChan chan struct{} } // nowFunc is a function that returns time.Time. @@ -264,7 +269,24 @@ func NewEndpointSet( } return res }, - endpoints: make(map[string]*endpointRef), + endpoints: make(map[string]*endpointRef), + firstUpdateChan: make(chan struct{}), + } +} + +// WaitForFirstUpdate blocks until the first endpoint update has completed. +// It returns immediately if the first update has already been done. +// The context can be used to set a timeout for waiting. +func (e *EndpointSet) WaitForFirstUpdate(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case _, ok := <-e.firstUpdateChan: + if !ok { + // Channel is closed, first update already completed + return nil + } + return nil } } @@ -285,7 +307,6 @@ func (e *EndpointSet) Update(ctx context.Context) { ) for _, spec := range e.endpointSpecs() { - spec := spec if er, existingRef := e.endpoints[spec.Addr()]; existingRef { wg.Add(1) @@ -373,6 +394,11 @@ func (e *EndpointSet) Update(ctx context.Context) { } e.endpointsMetric.Update(stats) + + // Signal that the first update has completed + e.firstUpdateOnce.Do(func() { + close(e.firstUpdateChan) + }) } func (e *EndpointSet) updateEndpoint(ctx context.Context, spec *GRPCEndpointSpec, er *endpointRef) { @@ -518,6 +544,19 @@ func (e *EndpointSet) GetExemplarsStores() []*exemplarspb.ExemplarStore { return exemplarStores } +// GetStatusClients returns a list of all active status clients. +func (e *EndpointSet) GetStatusClients() []statuspb.StatusClient { + endpoints := e.getQueryableRefs() + + statusClients := make([]statuspb.StatusClient, 0, len(endpoints)) + for _, er := range endpoints { + if er.HasStatusAPI() { + statusClients = append(statusClients, statuspb.NewStatusClient(er.cc)) + } + } + return statusClients +} + func (e *EndpointSet) Close() { e.endpointsMtx.Lock() defer e.endpointsMtx.Unlock() @@ -689,6 +728,13 @@ func (er *endpointRef) HasExemplarsAPI() bool { return er.metadata != nil && er.metadata.Exemplars != nil } +func (er *endpointRef) HasStatusAPI() bool { + er.mtx.RLock() + defer er.mtx.RUnlock() + + return er.metadata != nil && er.metadata.Status != nil +} + func (er *endpointRef) LabelSets() []labels.Labels { er.mtx.RLock() defer er.mtx.RUnlock() @@ -764,8 +810,8 @@ func (er *endpointRef) SupportsWithoutReplicaLabels() bool { func (er *endpointRef) String() string { mint, maxt := er.TimeRange() return fmt.Sprintf( - "Addr: %s LabelSets: %v MinTime: %d MaxTime: %d", - er.addr, labelpb.PromLabelSetsToString(er.LabelSets()), mint, maxt, + "Addr: %s MinTime: %d MaxTime: %d", + er.addr, mint, maxt, ) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/iter.go b/vendor/github.com/thanos-io/thanos/pkg/query/iter.go index 7bee002df20..e5276ec79bf 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/iter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/iter.go @@ -218,14 +218,15 @@ type chunkSeriesIterator struct { chunks []chunkenc.Iterator i int lastVal chunkenc.ValueType + + cur chunkenc.Iterator } func newChunkSeriesIterator(cs []chunkenc.Iterator) chunkenc.Iterator { if len(cs) == 0 { - // This should not happen. StoreAPI implementations should not send empty results. - return errSeriesIterator{err: errors.Errorf("store returned an empty result")} + return errSeriesIterator{err: errors.New("got empty chunks")} } - return &chunkSeriesIterator{chunks: cs} + return &chunkSeriesIterator{chunks: cs, cur: cs[0]} } func (it *chunkSeriesIterator) Seek(t int64) chunkenc.ValueType { @@ -245,19 +246,19 @@ func (it *chunkSeriesIterator) Seek(t int64) chunkenc.ValueType { } func (it *chunkSeriesIterator) At() (t int64, v float64) { - return it.chunks[it.i].At() + return it.cur.At() } func (it *chunkSeriesIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { - return it.chunks[it.i].AtHistogram(h) + return it.cur.AtHistogram(h) } func (it *chunkSeriesIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { - return it.chunks[it.i].AtFloatHistogram(fh) + return it.cur.AtFloatHistogram(fh) } func (it *chunkSeriesIterator) AtT() int64 { - return it.chunks[it.i].AtT() + return it.cur.AtT() } func (it *chunkSeriesIterator) Next() chunkenc.ValueType { @@ -276,11 +277,12 @@ func (it *chunkSeriesIterator) Next() chunkenc.ValueType { // Chunks are guaranteed to be ordered but not generally guaranteed to not overlap. // We must ensure to skip any overlapping range between adjacent chunks. it.i++ + it.cur = it.chunks[it.i] return it.Seek(lastT + 1) } func (it *chunkSeriesIterator) Err() error { - return it.chunks[it.i].Err() + return it.cur.Err() } type lazySeriesSet struct { diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/querier.go b/vendor/github.com/thanos-io/thanos/pkg/query/querier.go index 66ca4d93bd6..0e2a798d96e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/querier.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/querier.go @@ -173,10 +173,6 @@ func newQuerier( if logger == nil { logger = log.NewNopLogger() } - rl := make(map[string]struct{}) - for _, replicaLabel := range replicaLabels { - rl[replicaLabel] = struct{}{} - } partialResponseStrategy := storepb.PartialResponseStrategy_ABORT if partialResponse { diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go b/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go index 55017bed6f9..3fd4e45994b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go @@ -331,7 +331,7 @@ func (r *remoteQuery) Exec(ctx context.Context) *promql.Result { r.samplesStats = stats.NewQuerySamples(false) // Instant query. - if r.start == r.end { + if r.start.Equal(r.end) { request := &querypb.QueryRequest{ Query: r.plan.String(), QueryPlan: plan, diff --git a/vendor/github.com/thanos-io/thanos/pkg/querysharding/analyzer.go b/vendor/github.com/thanos-io/thanos/pkg/querysharding/analyzer.go index 80cb8cb3a8a..c7a1561b0db 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/querysharding/analyzer.go +++ b/vendor/github.com/thanos-io/thanos/pkg/querysharding/analyzer.go @@ -27,7 +27,7 @@ import ( ) var ( - notShardableErr = fmt.Errorf("expressions are not shardable") + errNotShardable = fmt.Errorf("expressions are not shardable") ) type Analyzer interface { @@ -104,13 +104,14 @@ func (a *QueryAnalyzer) Analyze(query string) (QueryAnalysis, error) { switch n := node.(type) { case *parser.Call: if n.Func != nil { - if n.Func.Name == "label_join" || n.Func.Name == "label_replace" { + switch n.Func.Name { + case "label_join", "label_replace": dstLabel := stringFromArg(n.Args[1]) dynamicLabels = append(dynamicLabels, dstLabel) - } else if n.Func.Name == "absent_over_time" || n.Func.Name == "absent" || n.Func.Name == "scalar" { + case "absent_over_time", "absent", "scalar": isShardable = false - return notShardableErr - } else if n.Func.Name == "histogram_quantile" { + return errNotShardable + case "histogram_quantile": analysis = analysis.scopeToLabels([]string{"le"}, false) } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go index a455a504b88..e11069352a6 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go +++ b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go @@ -124,7 +124,7 @@ func RetryWithLog(logger log.Logger, interval time.Duration, stopc <-chan struct } // CloseWithLogOnErr is making sure we log every error, even those from best effort tiny closers. -func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, a ...interface{}) { +func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, a ...any) { err := closer.Close() if err == nil { return @@ -143,7 +143,7 @@ func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, a ... } // ExhaustCloseWithLogOnErr closes the io.ReadCloser with a log message on error but exhausts the reader before. -func ExhaustCloseWithLogOnErr(logger log.Logger, r io.ReadCloser, format string, a ...interface{}) { +func ExhaustCloseWithLogOnErr(logger log.Logger, r io.ReadCloser, format string, a ...any) { _, err := io.Copy(io.Discard, r) if err != nil { level.Warn(logger).Log("msg", "failed to exhaust reader, performance may be impeded", "err", err) @@ -153,7 +153,7 @@ func ExhaustCloseWithLogOnErr(logger log.Logger, r io.ReadCloser, format string, } // CloseWithErrCapture closes closer, wraps any error with message from fmt and args, and stores this in err. -func CloseWithErrCapture(err *error, closer io.Closer, format string, a ...interface{}) { +func CloseWithErrCapture(err *error, closer io.Closer, format string, a ...any) { merr := errutil.MultiError{} merr.Add(*err) @@ -163,7 +163,7 @@ func CloseWithErrCapture(err *error, closer io.Closer, format string, a ...inter } // ExhaustCloseWithErrCapture closes the io.ReadCloser with error capture but exhausts the reader before. -func ExhaustCloseWithErrCapture(err *error, r io.ReadCloser, format string, a ...interface{}) { +func ExhaustCloseWithErrCapture(err *error, r io.ReadCloser, format string, a ...any) { _, copyErr := io.Copy(io.Discard, r) CloseWithErrCapture(err, r, format, a...) diff --git a/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go index 79547561f3e..5fbabfa9900 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go +++ b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go @@ -507,7 +507,7 @@ func (s *Shipper) blockMetasFromOldest() (metas []*metadata.Meta, failedBlocks [ metas = append(metas, m) } sort.Slice(metas, func(i, j int) bool { - return metas[i].BlockMeta.MinTime < metas[j].BlockMeta.MinTime + return metas[i].MinTime < metas[j].MinTime }) if len(failedBlocks) > 0 { diff --git a/vendor/github.com/thanos-io/thanos/pkg/status/statuspb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/status/statuspb/custom.go new file mode 100644 index 00000000000..0e183efd947 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/status/statuspb/custom.go @@ -0,0 +1,96 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package statuspb + +import ( + "cmp" + "maps" + "slices" + + v1 "github.com/prometheus/prometheus/web/api/v1" +) + +func NewTSDBStatisticsResponse(statistics *TSDBStatistics) *TSDBStatisticsResponse { + return &TSDBStatisticsResponse{ + Result: &TSDBStatisticsResponse_Statistics{ + Statistics: statistics, + }, + } +} + +func NewWarningTSDBStatisticsResponse(warning error) *TSDBStatisticsResponse { + return &TSDBStatisticsResponse{ + Result: &TSDBStatisticsResponse_Warning{ + Warning: warning.Error(), + }, + } +} + +// Merge merges the provided TSDBStatisticsEntry with the receiver. +func (tse *TSDBStatisticsEntry) Merge(stats *TSDBStatisticsEntry) { + tse.HeadStatistics.NumSeries += stats.HeadStatistics.NumSeries + tse.HeadStatistics.NumLabelPairs += stats.HeadStatistics.NumLabelPairs + tse.HeadStatistics.ChunkCount += stats.HeadStatistics.ChunkCount + + if tse.HeadStatistics.MinTime <= 0 || tse.HeadStatistics.MinTime > stats.HeadStatistics.MinTime { + tse.HeadStatistics.MinTime = stats.HeadStatistics.MinTime + } + + if tse.HeadStatistics.MaxTime < stats.HeadStatistics.MaxTime { + tse.HeadStatistics.MaxTime = stats.HeadStatistics.MaxTime + } + + tse.SeriesCountByMetricName = mergeStatistics(tse.SeriesCountByMetricName, stats.SeriesCountByMetricName, addValue) + // The same label values may exist on different instances so it makes more + // sense to keep the max value rather than adding them all. + tse.LabelValueCountByLabelName = mergeStatistics(tse.LabelValueCountByLabelName, stats.LabelValueCountByLabelName, maxValue) + tse.MemoryInBytesByLabelName = mergeStatistics(tse.MemoryInBytesByLabelName, stats.MemoryInBytesByLabelName, addValue) + tse.SeriesCountByLabelValuePair = mergeStatistics(tse.SeriesCountByLabelValuePair, stats.SeriesCountByLabelValuePair, addValue) +} + +func addValue(a, b uint64) uint64 { + return a + b +} + +func maxValue(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func mergeStatistics(a, b []Statistic, mergeFunc func(uint64, uint64) uint64) []Statistic { + merged := make(map[string]Statistic, len(a)) + for _, stat := range a { + merged[stat.Name] = stat + } + + for _, stat := range b { + v, found := merged[stat.Name] + if !found { + merged[stat.Name] = stat + continue + } + v.Value = mergeFunc(v.Value, stat.Value) + merged[stat.Name] = v + } + + return slices.SortedStableFunc(maps.Values(merged), func(a, b Statistic) int { + // Descending sort. + return cmp.Compare(b.Value, a.Value) + }) +} + +// ConvertToPrometheusTSDBStat converts a protobuf Statistic slice to the equivalent Prometheus struct. +func ConvertToPrometheusTSDBStat(stats []Statistic) []v1.TSDBStat { + ret := make([]v1.TSDBStat, len(stats)) + for i := range stats { + ret[i] = v1.TSDBStat{ + Name: stats[i].Name, + Value: stats[i].Value, + } + } + + return ret +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/status/statuspb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/status/statuspb/rpc.pb.go new file mode 100644 index 00000000000..ac7c8bdebc1 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/status/statuspb/rpc.pb.go @@ -0,0 +1,1949 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: status/statuspb/rpc.proto + +package statuspb + +import ( + context "context" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + storepb "github.com/thanos-io/thanos/pkg/store/storepb" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type TSDBStatisticsRequest struct { + Tenant string `protobuf:"bytes,1,opt,name=tenant,proto3" json:"tenant,omitempty"` + Limit int32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + PartialResponseStrategy storepb.PartialResponseStrategy `protobuf:"varint,3,opt,name=partial_response_strategy,json=partialResponseStrategy,proto3,enum=thanos.PartialResponseStrategy" json:"partial_response_strategy,omitempty"` +} + +func (m *TSDBStatisticsRequest) Reset() { *m = TSDBStatisticsRequest{} } +func (m *TSDBStatisticsRequest) String() string { return proto.CompactTextString(m) } +func (*TSDBStatisticsRequest) ProtoMessage() {} +func (*TSDBStatisticsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_d59a2444f79de84b, []int{0} +} +func (m *TSDBStatisticsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TSDBStatisticsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TSDBStatisticsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TSDBStatisticsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TSDBStatisticsRequest.Merge(m, src) +} +func (m *TSDBStatisticsRequest) XXX_Size() int { + return m.Size() +} +func (m *TSDBStatisticsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TSDBStatisticsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TSDBStatisticsRequest proto.InternalMessageInfo + +type TSDBStatisticsResponse struct { + // Types that are valid to be assigned to Result: + // *TSDBStatisticsResponse_Statistics + // *TSDBStatisticsResponse_Warning + Result isTSDBStatisticsResponse_Result `protobuf_oneof:"result"` +} + +func (m *TSDBStatisticsResponse) Reset() { *m = TSDBStatisticsResponse{} } +func (m *TSDBStatisticsResponse) String() string { return proto.CompactTextString(m) } +func (*TSDBStatisticsResponse) ProtoMessage() {} +func (*TSDBStatisticsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_d59a2444f79de84b, []int{1} +} +func (m *TSDBStatisticsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TSDBStatisticsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TSDBStatisticsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TSDBStatisticsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TSDBStatisticsResponse.Merge(m, src) +} +func (m *TSDBStatisticsResponse) XXX_Size() int { + return m.Size() +} +func (m *TSDBStatisticsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TSDBStatisticsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TSDBStatisticsResponse proto.InternalMessageInfo + +type isTSDBStatisticsResponse_Result interface { + isTSDBStatisticsResponse_Result() + MarshalTo([]byte) (int, error) + Size() int +} + +type TSDBStatisticsResponse_Statistics struct { + Statistics *TSDBStatistics `protobuf:"bytes,1,opt,name=statistics,proto3,oneof" json:"statistics,omitempty"` +} +type TSDBStatisticsResponse_Warning struct { + Warning string `protobuf:"bytes,2,opt,name=warning,proto3,oneof" json:"warning,omitempty"` +} + +func (*TSDBStatisticsResponse_Statistics) isTSDBStatisticsResponse_Result() {} +func (*TSDBStatisticsResponse_Warning) isTSDBStatisticsResponse_Result() {} + +func (m *TSDBStatisticsResponse) GetResult() isTSDBStatisticsResponse_Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *TSDBStatisticsResponse) GetStatistics() *TSDBStatistics { + if x, ok := m.GetResult().(*TSDBStatisticsResponse_Statistics); ok { + return x.Statistics + } + return nil +} + +func (m *TSDBStatisticsResponse) GetWarning() string { + if x, ok := m.GetResult().(*TSDBStatisticsResponse_Warning); ok { + return x.Warning + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TSDBStatisticsResponse) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TSDBStatisticsResponse_Statistics)(nil), + (*TSDBStatisticsResponse_Warning)(nil), + } +} + +type TSDBStatistics struct { + Statistics map[string]*TSDBStatisticsEntry `protobuf:"bytes,1,rep,name=statistics,proto3" json:"statistics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *TSDBStatistics) Reset() { *m = TSDBStatistics{} } +func (m *TSDBStatistics) String() string { return proto.CompactTextString(m) } +func (*TSDBStatistics) ProtoMessage() {} +func (*TSDBStatistics) Descriptor() ([]byte, []int) { + return fileDescriptor_d59a2444f79de84b, []int{2} +} +func (m *TSDBStatistics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TSDBStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TSDBStatistics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TSDBStatistics) XXX_Merge(src proto.Message) { + xxx_messageInfo_TSDBStatistics.Merge(m, src) +} +func (m *TSDBStatistics) XXX_Size() int { + return m.Size() +} +func (m *TSDBStatistics) XXX_DiscardUnknown() { + xxx_messageInfo_TSDBStatistics.DiscardUnknown(m) +} + +var xxx_messageInfo_TSDBStatistics proto.InternalMessageInfo + +type TSDBStatisticsEntry struct { + // Statistics from the TSDB head. + HeadStatistics HeadStatistics `protobuf:"bytes,1,opt,name=head_statistics,json=headStatistics,proto3" json:"headStats"` + SeriesCountByMetricName []Statistic `protobuf:"bytes,2,rep,name=series_count_by_metric_name,json=seriesCountByMetricName,proto3" json:"seriesCountByMetricName"` + LabelValueCountByLabelName []Statistic `protobuf:"bytes,3,rep,name=label_value_count_by_label_name,json=labelValueCountByLabelName,proto3" json:"labelValueCountByLabelName"` + MemoryInBytesByLabelName []Statistic `protobuf:"bytes,4,rep,name=memory_in_bytes_by_label_name,json=memoryInBytesByLabelName,proto3" json:"memoryInBytesByLabelName"` + SeriesCountByLabelValuePair []Statistic `protobuf:"bytes,5,rep,name=series_count_by_label_value_pair,json=seriesCountByLabelValuePair,proto3" json:"seriesCountByLabelValuePair"` +} + +func (m *TSDBStatisticsEntry) Reset() { *m = TSDBStatisticsEntry{} } +func (m *TSDBStatisticsEntry) String() string { return proto.CompactTextString(m) } +func (*TSDBStatisticsEntry) ProtoMessage() {} +func (*TSDBStatisticsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_d59a2444f79de84b, []int{3} +} +func (m *TSDBStatisticsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TSDBStatisticsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TSDBStatisticsEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TSDBStatisticsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_TSDBStatisticsEntry.Merge(m, src) +} +func (m *TSDBStatisticsEntry) XXX_Size() int { + return m.Size() +} +func (m *TSDBStatisticsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_TSDBStatisticsEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_TSDBStatisticsEntry proto.InternalMessageInfo + +type HeadStatistics struct { + // Total number of series. + NumSeries uint64 `protobuf:"varint,1,opt,name=num_series,json=numSeries,proto3" json:"numSeries"` + // Total number of label pairs. + NumLabelPairs int64 `protobuf:"varint,2,opt,name=num_label_pairs,json=numLabelPairs,proto3" json:"numLabelPairs"` + // Total number of chunks (not implemented yet). + ChunkCount int64 `protobuf:"varint,3,opt,name=chunk_count,json=chunkCount,proto3" json:"chunkCount"` + // Minimum timestamp. + MinTime int64 `protobuf:"varint,4,opt,name=min_time,json=minTime,proto3" json:"minTime"` + // Maximum timestamp. + MaxTime int64 `protobuf:"varint,5,opt,name=max_time,json=maxTime,proto3" json:"maxTime"` +} + +func (m *HeadStatistics) Reset() { *m = HeadStatistics{} } +func (m *HeadStatistics) String() string { return proto.CompactTextString(m) } +func (*HeadStatistics) ProtoMessage() {} +func (*HeadStatistics) Descriptor() ([]byte, []int) { + return fileDescriptor_d59a2444f79de84b, []int{4} +} +func (m *HeadStatistics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HeadStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HeadStatistics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HeadStatistics) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeadStatistics.Merge(m, src) +} +func (m *HeadStatistics) XXX_Size() int { + return m.Size() +} +func (m *HeadStatistics) XXX_DiscardUnknown() { + xxx_messageInfo_HeadStatistics.DiscardUnknown(m) +} + +var xxx_messageInfo_HeadStatistics proto.InternalMessageInfo + +type Statistic struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value uint64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Statistic) Reset() { *m = Statistic{} } +func (m *Statistic) String() string { return proto.CompactTextString(m) } +func (*Statistic) ProtoMessage() {} +func (*Statistic) Descriptor() ([]byte, []int) { + return fileDescriptor_d59a2444f79de84b, []int{5} +} +func (m *Statistic) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Statistic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Statistic.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Statistic) XXX_Merge(src proto.Message) { + xxx_messageInfo_Statistic.Merge(m, src) +} +func (m *Statistic) XXX_Size() int { + return m.Size() +} +func (m *Statistic) XXX_DiscardUnknown() { + xxx_messageInfo_Statistic.DiscardUnknown(m) +} + +var xxx_messageInfo_Statistic proto.InternalMessageInfo + +func init() { + proto.RegisterType((*TSDBStatisticsRequest)(nil), "thanos.TSDBStatisticsRequest") + proto.RegisterType((*TSDBStatisticsResponse)(nil), "thanos.TSDBStatisticsResponse") + proto.RegisterType((*TSDBStatistics)(nil), "thanos.TSDBStatistics") + proto.RegisterMapType((map[string]*TSDBStatisticsEntry)(nil), "thanos.TSDBStatistics.StatisticsEntry") + proto.RegisterType((*TSDBStatisticsEntry)(nil), "thanos.TSDBStatisticsEntry") + proto.RegisterType((*HeadStatistics)(nil), "thanos.HeadStatistics") + proto.RegisterType((*Statistic)(nil), "thanos.Statistic") +} + +func init() { proto.RegisterFile("status/statuspb/rpc.proto", fileDescriptor_d59a2444f79de84b) } + +var fileDescriptor_d59a2444f79de84b = []byte{ + // 733 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x55, 0xcd, 0x4e, 0x1b, 0x49, + 0x10, 0xf6, 0xe0, 0x1f, 0x70, 0x59, 0x98, 0xa5, 0x97, 0x05, 0x63, 0xc4, 0x8c, 0xe5, 0x95, 0x90, + 0x0f, 0x2b, 0x7b, 0xe3, 0x28, 0x12, 0xc9, 0x71, 0xf2, 0x23, 0x22, 0x91, 0x88, 0xb4, 0x51, 0x0e, + 0x70, 0x18, 0xb5, 0x4d, 0xcb, 0x6e, 0xe1, 0xe9, 0x99, 0x4c, 0xf7, 0x24, 0x4c, 0xa4, 0xbc, 0x43, + 0x9e, 0x21, 0xcf, 0x90, 0x87, 0xe0, 0x90, 0x03, 0xc7, 0x9c, 0xac, 0x04, 0x6e, 0x7e, 0x84, 0x9c, + 0xa2, 0xe9, 0x1e, 0xff, 0xc6, 0xe6, 0x02, 0xd5, 0x55, 0x5f, 0xd5, 0xf7, 0x55, 0x4d, 0x77, 0x19, + 0x76, 0x85, 0x24, 0x32, 0x14, 0x0d, 0xfd, 0xcf, 0x6f, 0x37, 0x02, 0xbf, 0x53, 0xf7, 0x03, 0x4f, + 0x7a, 0x28, 0x27, 0x7b, 0x84, 0x7b, 0xa2, 0xbc, 0x2b, 0xa4, 0x17, 0xd0, 0x86, 0xfa, 0xeb, 0xb7, + 0x1b, 0x32, 0xf2, 0xa9, 0xd0, 0x90, 0xf2, 0x56, 0xd7, 0xeb, 0x7a, 0xca, 0x6c, 0xc4, 0x96, 0xf6, + 0x56, 0xbf, 0x18, 0xf0, 0xcf, 0x69, 0xeb, 0x99, 0xdd, 0x92, 0x44, 0x32, 0x21, 0x59, 0x47, 0x60, + 0xfa, 0x2e, 0xa4, 0x42, 0xa2, 0x6d, 0xc8, 0x49, 0xca, 0x09, 0x97, 0x25, 0xa3, 0x62, 0xd4, 0xf2, + 0x38, 0x39, 0xa1, 0x2d, 0xc8, 0xf6, 0x99, 0xcb, 0x64, 0x69, 0xa5, 0x62, 0xd4, 0xb2, 0x58, 0x1f, + 0xd0, 0x39, 0xec, 0xfa, 0x24, 0x90, 0x8c, 0xf4, 0x9d, 0x80, 0x0a, 0xdf, 0xe3, 0x82, 0x3a, 0x42, + 0x06, 0x44, 0xd2, 0x6e, 0x54, 0x4a, 0x57, 0x8c, 0x5a, 0xb1, 0x69, 0xd5, 0xb5, 0xc8, 0xfa, 0x89, + 0x06, 0xe2, 0x04, 0xd7, 0x4a, 0x60, 0x78, 0xc7, 0x5f, 0x1c, 0xa8, 0x5e, 0xc1, 0xf6, 0xbc, 0x46, + 0x8d, 0x40, 0x87, 0x00, 0x62, 0xec, 0x55, 0x42, 0x0b, 0xcd, 0xed, 0x11, 0xcf, 0x6c, 0xce, 0x51, + 0x0a, 0x4f, 0x61, 0x51, 0x19, 0x56, 0x3f, 0x90, 0x80, 0x33, 0xde, 0x55, 0x8d, 0xe4, 0x8f, 0x52, + 0x78, 0xe4, 0xb0, 0xd7, 0x20, 0x17, 0x50, 0x11, 0xf6, 0x65, 0xf5, 0xab, 0x01, 0xc5, 0xd9, 0x32, + 0xe8, 0xc5, 0x1c, 0x65, 0xba, 0x56, 0x68, 0x1e, 0x2c, 0xa6, 0xac, 0x4f, 0xcc, 0xe7, 0x5c, 0x06, + 0xd1, 0xb4, 0x80, 0xf2, 0x19, 0x6c, 0xcc, 0x85, 0xd1, 0x5f, 0x90, 0xbe, 0xa4, 0x51, 0x32, 0xef, + 0xd8, 0x44, 0x0f, 0x20, 0xfb, 0x9e, 0xf4, 0x43, 0xaa, 0x34, 0x16, 0x9a, 0x7b, 0x8b, 0x79, 0x74, + 0x71, 0x8d, 0x7c, 0xb2, 0x72, 0x68, 0x54, 0xbf, 0x65, 0xe0, 0xef, 0x05, 0x10, 0x84, 0x61, 0xa3, + 0x47, 0xc9, 0x85, 0xb3, 0x7c, 0x66, 0x47, 0x94, 0x5c, 0x4c, 0xb2, 0xec, 0xcd, 0xeb, 0x81, 0x95, + 0x1a, 0x0e, 0xac, 0x7c, 0x2f, 0xf1, 0x0b, 0x5c, 0xec, 0xcd, 0x40, 0x90, 0x0f, 0x7b, 0x82, 0x06, + 0x8c, 0x0a, 0xa7, 0xe3, 0x85, 0x5c, 0x3a, 0xed, 0xc8, 0x71, 0xa9, 0x0c, 0x58, 0xc7, 0xe1, 0xc4, + 0x8d, 0x85, 0xc7, 0x03, 0xda, 0x1c, 0xd5, 0x1f, 0x27, 0xda, 0x56, 0x52, 0x7a, 0x47, 0x67, 0x3f, + 0x8d, 0x93, 0xed, 0xe8, 0x95, 0x4a, 0x7d, 0x4d, 0x5c, 0x8a, 0x97, 0x05, 0xd0, 0x47, 0xb0, 0xfa, + 0xa4, 0x4d, 0xfb, 0x8e, 0x6a, 0x78, 0x42, 0xab, 0x9d, 0x8a, 0x35, 0xbd, 0x8c, 0xb5, 0x9a, 0xb0, + 0x96, 0x15, 0xf8, 0x6d, 0x5c, 0x20, 0x21, 0x38, 0x8e, 0x1d, 0x8a, 0xf8, 0x9e, 0x18, 0x92, 0xb0, + 0xef, 0x52, 0xd7, 0x0b, 0x22, 0x87, 0x71, 0xa7, 0x1d, 0x49, 0x2a, 0xe6, 0x98, 0x33, 0xcb, 0x98, + 0x2b, 0x09, 0x73, 0x49, 0xe7, 0xbf, 0xe4, 0x76, 0x9c, 0x3d, 0xcd, 0xbb, 0x34, 0x82, 0x3e, 0x41, + 0x65, 0x7e, 0xc6, 0xd3, 0x13, 0xf0, 0x09, 0x0b, 0x4a, 0xd9, 0x65, 0xc4, 0xff, 0x26, 0xc4, 0x7b, + 0x33, 0xf3, 0x3c, 0x1e, 0xf7, 0x78, 0x42, 0x58, 0x80, 0xef, 0x0b, 0x56, 0x7f, 0x19, 0x50, 0x9c, + 0xbd, 0x18, 0xe8, 0x3f, 0x00, 0x1e, 0xba, 0x8e, 0xce, 0x52, 0x97, 0x28, 0x63, 0xaf, 0xc7, 0x17, + 0x85, 0x87, 0x6e, 0x4b, 0x39, 0xf1, 0xc4, 0x44, 0x8f, 0x61, 0x23, 0x46, 0x6b, 0xcd, 0xb1, 0x5a, + 0xa1, 0x2e, 0x74, 0xda, 0xde, 0x1c, 0x0e, 0xac, 0x75, 0x1e, 0xba, 0x8a, 0x30, 0xe6, 0x12, 0x78, + 0xf6, 0x88, 0x1a, 0x50, 0xe8, 0xf4, 0x42, 0x7e, 0xa9, 0x3b, 0x57, 0xab, 0x24, 0x6d, 0x17, 0x87, + 0x03, 0x0b, 0x94, 0x5b, 0x09, 0xc6, 0x53, 0x36, 0x3a, 0x80, 0x35, 0x97, 0x71, 0x47, 0x32, 0xf5, + 0x31, 0x62, 0x74, 0x61, 0x38, 0xb0, 0x56, 0x5d, 0xc6, 0x4f, 0x99, 0x4b, 0xf1, 0xc8, 0x50, 0x38, + 0x72, 0xa5, 0x71, 0xd9, 0x29, 0x1c, 0xb9, 0x4a, 0x70, 0xda, 0xa8, 0x3e, 0x82, 0xfc, 0xb8, 0x6f, + 0x84, 0x20, 0xa3, 0xbe, 0xb2, 0x7e, 0xa2, 0xca, 0x8e, 0x17, 0xe2, 0xe4, 0x8d, 0x66, 0x92, 0x67, + 0xd8, 0x3c, 0x87, 0x5c, 0x4b, 0xed, 0x69, 0xf4, 0xe6, 0x8f, 0x15, 0xb2, 0xbf, 0xf8, 0x19, 0x27, + 0x9b, 0xb7, 0x6c, 0x2e, 0x0b, 0xeb, 0xa5, 0xf7, 0xbf, 0x61, 0x1f, 0x5c, 0xff, 0x34, 0x53, 0xd7, + 0xb7, 0xa6, 0x71, 0x73, 0x6b, 0x1a, 0x3f, 0x6e, 0x4d, 0xe3, 0xf3, 0x9d, 0x99, 0xba, 0xb9, 0x33, + 0x53, 0xdf, 0xef, 0xcc, 0xd4, 0xd9, 0xda, 0xe8, 0x07, 0xa2, 0x9d, 0x53, 0x4b, 0xfe, 0xe1, 0xef, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, 0x23, 0x65, 0x33, 0x3a, 0x06, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// StatusClient is the client API for Status service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type StatusClient interface { + // TSDBStatistics has information for all TSDBs. + TSDBStatistics(ctx context.Context, in *TSDBStatisticsRequest, opts ...grpc.CallOption) (Status_TSDBStatisticsClient, error) +} + +type statusClient struct { + cc *grpc.ClientConn +} + +func NewStatusClient(cc *grpc.ClientConn) StatusClient { + return &statusClient{cc} +} + +func (c *statusClient) TSDBStatistics(ctx context.Context, in *TSDBStatisticsRequest, opts ...grpc.CallOption) (Status_TSDBStatisticsClient, error) { + stream, err := c.cc.NewStream(ctx, &_Status_serviceDesc.Streams[0], "/thanos.Status/TSDBStatistics", opts...) + if err != nil { + return nil, err + } + x := &statusTSDBStatisticsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Status_TSDBStatisticsClient interface { + Recv() (*TSDBStatisticsResponse, error) + grpc.ClientStream +} + +type statusTSDBStatisticsClient struct { + grpc.ClientStream +} + +func (x *statusTSDBStatisticsClient) Recv() (*TSDBStatisticsResponse, error) { + m := new(TSDBStatisticsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// StatusServer is the server API for Status service. +type StatusServer interface { + // TSDBStatistics has information for all TSDBs. + TSDBStatistics(*TSDBStatisticsRequest, Status_TSDBStatisticsServer) error +} + +// UnimplementedStatusServer can be embedded to have forward compatible implementations. +type UnimplementedStatusServer struct { +} + +func (*UnimplementedStatusServer) TSDBStatistics(req *TSDBStatisticsRequest, srv Status_TSDBStatisticsServer) error { + return status.Errorf(codes.Unimplemented, "method TSDBStatistics not implemented") +} + +func RegisterStatusServer(s *grpc.Server, srv StatusServer) { + s.RegisterService(&_Status_serviceDesc, srv) +} + +func _Status_TSDBStatistics_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TSDBStatisticsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StatusServer).TSDBStatistics(m, &statusTSDBStatisticsServer{stream}) +} + +type Status_TSDBStatisticsServer interface { + Send(*TSDBStatisticsResponse) error + grpc.ServerStream +} + +type statusTSDBStatisticsServer struct { + grpc.ServerStream +} + +func (x *statusTSDBStatisticsServer) Send(m *TSDBStatisticsResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Status_serviceDesc = grpc.ServiceDesc{ + ServiceName: "thanos.Status", + HandlerType: (*StatusServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "TSDBStatistics", + Handler: _Status_TSDBStatistics_Handler, + ServerStreams: true, + }, + }, + Metadata: "status/statuspb/rpc.proto", +} + +func (m *TSDBStatisticsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TSDBStatisticsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TSDBStatisticsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PartialResponseStrategy != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.PartialResponseStrategy)) + i-- + dAtA[i] = 0x18 + } + if m.Limit != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x10 + } + if len(m.Tenant) > 0 { + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TSDBStatisticsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TSDBStatisticsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TSDBStatisticsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Result != nil { + { + size := m.Result.Size() + i -= size + if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *TSDBStatisticsResponse_Statistics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TSDBStatisticsResponse_Statistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Statistics != nil { + { + size, err := m.Statistics.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *TSDBStatisticsResponse_Warning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TSDBStatisticsResponse_Warning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Warning) + copy(dAtA[i:], m.Warning) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Warning))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *TSDBStatistics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TSDBStatistics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TSDBStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Statistics) > 0 { + for k := range m.Statistics { + v := m.Statistics[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintRpc(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintRpc(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TSDBStatisticsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TSDBStatisticsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TSDBStatisticsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SeriesCountByLabelValuePair) > 0 { + for iNdEx := len(m.SeriesCountByLabelValuePair) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SeriesCountByLabelValuePair[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.MemoryInBytesByLabelName) > 0 { + for iNdEx := len(m.MemoryInBytesByLabelName) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MemoryInBytesByLabelName[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.LabelValueCountByLabelName) > 0 { + for iNdEx := len(m.LabelValueCountByLabelName) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LabelValueCountByLabelName[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.SeriesCountByMetricName) > 0 { + for iNdEx := len(m.SeriesCountByMetricName) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SeriesCountByMetricName[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.HeadStatistics.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HeadStatistics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeadStatistics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HeadStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxTime != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MaxTime)) + i-- + dAtA[i] = 0x28 + } + if m.MinTime != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MinTime)) + i-- + dAtA[i] = 0x20 + } + if m.ChunkCount != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ChunkCount)) + i-- + dAtA[i] = 0x18 + } + if m.NumLabelPairs != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.NumLabelPairs)) + i-- + dAtA[i] = 0x10 + } + if m.NumSeries != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.NumSeries)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Statistic) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Statistic) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Statistic) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { + offset -= sovRpc(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TSDBStatisticsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tenant) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } + if m.PartialResponseStrategy != 0 { + n += 1 + sovRpc(uint64(m.PartialResponseStrategy)) + } + return n +} + +func (m *TSDBStatisticsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + n += m.Result.Size() + } + return n +} + +func (m *TSDBStatisticsResponse_Statistics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Statistics != nil { + l = m.Statistics.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *TSDBStatisticsResponse_Warning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Warning) + n += 1 + l + sovRpc(uint64(l)) + return n +} +func (m *TSDBStatistics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Statistics) > 0 { + for k, v := range m.Statistics { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovRpc(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovRpc(uint64(len(k))) + l + n += mapEntrySize + 1 + sovRpc(uint64(mapEntrySize)) + } + } + return n +} + +func (m *TSDBStatisticsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.HeadStatistics.Size() + n += 1 + l + sovRpc(uint64(l)) + if len(m.SeriesCountByMetricName) > 0 { + for _, e := range m.SeriesCountByMetricName { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.LabelValueCountByLabelName) > 0 { + for _, e := range m.LabelValueCountByLabelName { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.MemoryInBytesByLabelName) > 0 { + for _, e := range m.MemoryInBytesByLabelName { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.SeriesCountByLabelValuePair) > 0 { + for _, e := range m.SeriesCountByLabelValuePair { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *HeadStatistics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NumSeries != 0 { + n += 1 + sovRpc(uint64(m.NumSeries)) + } + if m.NumLabelPairs != 0 { + n += 1 + sovRpc(uint64(m.NumLabelPairs)) + } + if m.ChunkCount != 0 { + n += 1 + sovRpc(uint64(m.ChunkCount)) + } + if m.MinTime != 0 { + n += 1 + sovRpc(uint64(m.MinTime)) + } + if m.MaxTime != 0 { + n += 1 + sovRpc(uint64(m.MaxTime)) + } + return n +} + +func (m *Statistic) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Value != 0 { + n += 1 + sovRpc(uint64(m.Value)) + } + return n +} + +func sovRpc(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRpc(x uint64) (n int) { + return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *TSDBStatisticsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TSDBStatisticsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TSDBStatisticsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tenant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialResponseStrategy", wireType) + } + m.PartialResponseStrategy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PartialResponseStrategy |= storepb.PartialResponseStrategy(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TSDBStatisticsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TSDBStatisticsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TSDBStatisticsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TSDBStatistics{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &TSDBStatisticsResponse_Statistics{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Result = &TSDBStatisticsResponse_Warning{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TSDBStatistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TSDBStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TSDBStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Statistics == nil { + m.Statistics = make(map[string]*TSDBStatisticsEntry) + } + var mapkey string + var mapvalue *TSDBStatisticsEntry + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthRpc + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthRpc + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthRpc + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthRpc + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &TSDBStatisticsEntry{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Statistics[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TSDBStatisticsEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TSDBStatisticsEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TSDBStatisticsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HeadStatistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.HeadStatistics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesCountByMetricName", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SeriesCountByMetricName = append(m.SeriesCountByMetricName, Statistic{}) + if err := m.SeriesCountByMetricName[len(m.SeriesCountByMetricName)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelValueCountByLabelName", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelValueCountByLabelName = append(m.LabelValueCountByLabelName, Statistic{}) + if err := m.LabelValueCountByLabelName[len(m.LabelValueCountByLabelName)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryInBytesByLabelName", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MemoryInBytesByLabelName = append(m.MemoryInBytesByLabelName, Statistic{}) + if err := m.MemoryInBytesByLabelName[len(m.MemoryInBytesByLabelName)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesCountByLabelValuePair", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SeriesCountByLabelValuePair = append(m.SeriesCountByLabelValuePair, Statistic{}) + if err := m.SeriesCountByLabelValuePair[len(m.SeriesCountByLabelValuePair)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeadStatistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeadStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeadStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumSeries", wireType) + } + m.NumSeries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumSeries |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumLabelPairs", wireType) + } + m.NumLabelPairs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumLabelPairs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkCount", wireType) + } + m.ChunkCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ChunkCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + } + m.MinTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + } + m.MaxTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Statistic) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Statistic: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Statistic: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRpc(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRpc + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRpc + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthRpc + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRpc = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/thanos-io/thanos/pkg/status/statuspb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/status/statuspb/rpc.proto new file mode 100644 index 00000000000..a7d26f8607e --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/status/statuspb/rpc.proto @@ -0,0 +1,81 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +syntax = "proto3"; +package thanos; + +import "store/storepb/types.proto"; +//import "store/labelpb/types.proto"; +import "gogoproto/gogo.proto"; +//import "google/protobuf/timestamp.proto"; + +option go_package = "statuspb"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +// Do not generate XXX fields to reduce memory footprint and opening a door +// for zero-copy casts to/from prometheus data types. +option (gogoproto.goproto_unkeyed_all) = false; +option (gogoproto.goproto_unrecognized_all) = false; +option (gogoproto.goproto_sizecache_all) = false; + +// Status represents the API responsible for gathering status information. +service Status { + // TSDBStatistics has information for all TSDBs. + rpc TSDBStatistics(TSDBStatisticsRequest) returns (stream TSDBStatisticsResponse); +} + +message TSDBStatisticsRequest { + string tenant = 1; + int32 limit = 2; + PartialResponseStrategy partial_response_strategy = 3; +} + +message TSDBStatisticsResponse { + oneof result { + TSDBStatistics statistics = 1; + + // warning is used to warn status API users about suspicious cases or + // partial response (if enabled). + string warning = 2; + } +} + +message TSDBStatistics { + map statistics = 1; +} + +message TSDBStatisticsEntry { + // Statistics from the TSDB head. + HeadStatistics head_statistics = 1 [(gogoproto.jsontag) = "headStats", (gogoproto.nullable) = false]; + + repeated Statistic series_count_by_metric_name = 2 [(gogoproto.jsontag) = "seriesCountByMetricName", (gogoproto.nullable) = false]; + repeated Statistic label_value_count_by_label_name = 3 [(gogoproto.jsontag) = "labelValueCountByLabelName", (gogoproto.nullable) = false]; + repeated Statistic memory_in_bytes_by_label_name = 4 [(gogoproto.jsontag) = "memoryInBytesByLabelName", (gogoproto.nullable) = false]; + repeated Statistic series_count_by_label_value_pair = 5 [(gogoproto.jsontag) = "seriesCountByLabelValuePair", (gogoproto.nullable) = false]; +} + +message HeadStatistics { + // Total number of series. + uint64 num_series = 1 [(gogoproto.jsontag) = "numSeries" ]; + + // Total number of label pairs. + int64 num_label_pairs = 2 [(gogoproto.jsontag) = "numLabelPairs" ]; + + // Total number of chunks (not implemented yet). + int64 chunk_count = 3 [(gogoproto.jsontag) = "chunkCount" ]; + + // Minimum timestamp. + int64 min_time = 4 [(gogoproto.jsontag) = "minTime" ]; + + // Maximum timestamp. + int64 max_time = 5 [(gogoproto.jsontag) = "maxTime" ]; +} + +message Statistic { + string name = 1; + uint64 value = 2; +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index a2095d61e38..fb8b8f1c648 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -126,7 +126,7 @@ const ( var ( errBlockSyncConcurrencyNotValid = errors.New("the block sync concurrency must be equal or greater than 1.") - hashPool = sync.Pool{New: func() interface{} { return xxhash.New() }} + hashPool = sync.Pool{New: func() any { return xxhash.New() }} postingsPool zeropool.Pool[[]storage.SeriesRef] ) @@ -668,7 +668,7 @@ func NewBucketStore( dir: dir, indexCache: noopCache{}, matcherCache: storecache.NoopMatchersCache, - buffers: sync.Pool{New: func() interface{} { + buffers: sync.Pool{New: func() any { b := make([]byte, 0, initialBufSize) return &b }}, @@ -744,8 +744,7 @@ func (s *BucketStore) SyncBlocks(ctx context.Context) error { blockc := make(chan *metadata.Meta) for i := 0; i < s.blockSyncConcurrency; i++ { - wg.Add(1) - go func() { + wg.Go(func() { for meta := range blockc { if preAddErr := s.blockLifecycleCallback.PreAdd(*meta); preAddErr != nil { continue @@ -754,8 +753,7 @@ func (s *BucketStore) SyncBlocks(ctx context.Context) error { continue } } - wg.Done() - }() + }) } for id, meta := range metas { @@ -1294,10 +1292,7 @@ func (b *blockSeriesClient) Recv() (*storepb.SeriesResponse, error) { func (b *blockSeriesClient) nextBatch(tenant string) error { start := b.i - end := start + uint64(b.batchSize) - if end > uint64(len(b.lazyPostings.postings)) { - end = uint64(len(b.lazyPostings.postings)) - } + end := min(start+uint64(b.batchSize), uint64(len(b.lazyPostings.postings))) b.i = end lazyExpandedPosting := b.lazyPostings.lazyExpanded() @@ -1334,7 +1329,7 @@ func (b *blockSeriesClient) nextBatch(tenant string) error { seriesMatched := 0 b.entries = b.entries[:0] OUTER: - for i := 0; i < len(postingsBatch); i++ { + for i := range postingsBatch { if err := b.ctx.Err(); err != nil { return err } @@ -1919,7 +1914,6 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq var logger = s.requestLoggerFunc(ctx, s.logger) for _, b := range s.blocks { - b := b gctx := gctx if !b.overlapsClosedInterval(req.Start, req.End) { @@ -2100,10 +2094,8 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR if err != nil { return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "translate request labels matchers").Error()) } - for i := range req.WithoutReplicaLabels { - if req.Label == req.WithoutReplicaLabels[i] { - return &storepb.LabelValuesResponse{}, nil - } + if slices.Contains(req.WithoutReplicaLabels, req.Label) { + return &storepb.LabelValuesResponse{}, nil } tenant, _ := tenancy.GetTenantFromGRPCMetadata(ctx) @@ -2144,7 +2136,6 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR var stats = &queryStats{} for _, b := range s.blocks { - b := b if !b.overlapsClosedInterval(req.Start, req.End) { continue @@ -2914,7 +2905,6 @@ func checkNilPosting(name, value string, p index.Postings) index.Postings { func matchersToPostingGroups(ctx context.Context, lvalsFn func(name string) ([]string, error), ms []*labels.Matcher) ([]*postingGroup, error) { matchersMap := make(map[string]map[string]*labels.Matcher) for _, m := range ms { - m := m if _, ok := matchersMap[m.Name]; !ok { matchersMap[m.Name] = make(map[string]*labels.Matcher) } @@ -3822,7 +3812,7 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a n, err = io.ReadFull(bufReader, cb) readOffset += n // Unexpected EOF for last chunk could be a valid case. Any other errors are definitely real. - if err != nil && !(errors.Is(err, io.ErrUnexpectedEOF) && i == len(pIdxs)-1) { + if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) && i != len(pIdxs)-1 { return errors.Wrapf(err, "read range for seq %d offset %x", seq, pIdx.offset) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go index 3cdca83ceda..0078cad36ba 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go @@ -100,7 +100,7 @@ func NewCommonMetrics(reg prometheus.Registerer) *CommonMetrics { // CacheKey defines cache key used in index cache. type CacheKey struct { Block string - Key interface{} + Key any Compression string } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket.go index 11faa03d6a6..7d4b704db57 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket.go @@ -364,10 +364,7 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset totalRequestedBytes := int64(0) for off := startRange; off < endRange; off += cfg.SubrangeSize { - end := off + cfg.SubrangeSize - if end > attrs.Size { - end = attrs.Size - } + end := min(off+cfg.SubrangeSize, attrs.Size) totalRequestedBytes += (end - off) objectSubrange := cachekey.BucketCacheKey{Verb: cachekey.SubrangeVerb, Name: name, Start: off, End: end} k := objectSubrange.String() @@ -425,7 +422,6 @@ func (cb *CachingBucket) fetchMissingSubranges(ctx context.Context, name string, // Run parallel queries for each missing range. Fetched data is stored into 'hits' map, protected by hitsMutex. g, gctx := errgroup.WithContext(ctx) for _, m := range missing { - m := m g.Go(func() error { r, err := cb.Bucket.GetRange(gctx, name, m.start, m.end-m.start) if err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket_factory.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket_factory.go index 99db3132b92..fb287b40845 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket_factory.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/caching_bucket_factory.go @@ -38,7 +38,7 @@ const ( // CachingWithBackendConfig is a configuration of caching bucket used by Store component. type CachingWithBackendConfig struct { Type BucketCacheProvider `yaml:"type"` - BackendConfig interface{} `yaml:"config"` + BackendConfig any `yaml:"config"` // Basic unit used to cache chunks. ChunkSubrangeSize int64 `yaml:"chunk_subrange_size"` diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/factory.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/factory.go index 78279e7b2d0..27c19f8cbde 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/factory.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/factory.go @@ -28,7 +28,7 @@ const ( // IndexCacheConfig specifies the index cache config. type IndexCacheConfig struct { Type IndexCacheProvider `yaml:"type"` - Config interface{} `yaml:"config"` + Config any `yaml:"config"` // Available item types are Postings, Series and ExpandedPostings. EnabledItems []string `yaml:"enabled_items"` diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/matchers_cache.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/matchers_cache.go index e3b18603a6f..1a4e00e6f59 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/matchers_cache.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/matchers_cache.go @@ -118,7 +118,7 @@ func (c *LruMatchersCache) GetOrSet(m ConversionLabelMatcher, newItem NewItemFun return nil, err } - v, err, _ := c.sf.Do(key, func() (interface{}, error) { + v, err, _ := c.sf.Do(key, func() (any, error) { if item, ok := c.cache.Get(key); ok { c.metrics.hitsTotal.Inc() return item, nil diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/labelpb/label.go b/vendor/github.com/thanos-io/thanos/pkg/store/labelpb/label.go index 0de1512acf0..38bcc63421f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/labelpb/label.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/labelpb/label.go @@ -12,8 +12,10 @@ import ( "io" "sort" "strings" + "sync" "unsafe" + "github.com/VictoriaMetrics/easyproto" "github.com/cespare/xxhash/v2" "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" @@ -28,12 +30,12 @@ var ( sep = []byte{'\xff'} ) -func safeBytes(buf string) []byte { - return []byte(buf) +func noAllocString(buf []byte) string { + return *(*string)(unsafe.Pointer(&buf)) } -func safeString(buf []byte) string { - return string(buf) +func noAllocBytes(buf string) []byte { + return *(*[]byte)(unsafe.Pointer(&buf)) } // ZLabelsFromPromLabels converts Prometheus labels to slice of labelpb.ZLabel in type unsafe manner. @@ -65,8 +67,8 @@ func ReAllocZLabelsStrings(lset *[]ZLabel, intern bool) { } for j, l := range *lset { - (*lset)[j].Name = string(safeBytes(l.Name)) - (*lset)[j].Value = string(safeBytes(l.Value)) + (*lset)[j].Name = string(noAllocBytes(l.Name)) + (*lset)[j].Value = string(noAllocBytes(l.Value)) } } @@ -80,7 +82,7 @@ func internLabelString(s string) string { // detachAndInternLabelString reallocates the label string to detach it // from a bigger memory pool and interns the string. func detachAndInternLabelString(s string) string { - return internLabelString(string(safeBytes(s))) + return internLabelString(string(noAllocBytes(s))) } // ZLabelSetsToPromLabelSets converts slice of labelpb.ZLabelSet to slice of Prometheus labels. @@ -191,7 +193,7 @@ func (m *ZLabel) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = safeString(data[iNdEx:postIndex]) + m.Name = noAllocString(data[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -223,7 +225,7 @@ func (m *ZLabel) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = safeString(data[iNdEx:postIndex]) + m.Value = noAllocString(data[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -333,8 +335,8 @@ func (m *ZLabelSet) PromLabels() labels.Labels { func DeepCopy(lbls []ZLabel) []ZLabel { ret := make([]ZLabel, len(lbls)) for i := range lbls { - ret[i].Name = string(safeBytes(lbls[i].Name)) - ret[i].Value = string(safeBytes(lbls[i].Value)) + ret[i].Name = string(noAllocBytes(lbls[i].Name)) + ret[i].Value = string(noAllocBytes(lbls[i].Value)) } return ret } @@ -425,3 +427,67 @@ func (z ZLabelSets) Less(i, j int) bool { return l == lenI } + +type CustomLabelset labels.Labels + +var builderPool = &sync.Pool{ + New: func() any { + b := labels.NewScratchBuilder(8) + return &b + }, +} + +func (l *CustomLabelset) UnmarshalProtobuf(src []byte) (err error) { + b := builderPool.Get().(*labels.ScratchBuilder) + b.Reset() + + defer builderPool.Put(b) + + var fc easyproto.FieldContext + + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return errors.Wrap(err, "unmarshal next field") + } + + if fc.FieldNum != 1 { + return fmt.Errorf("expected field 1, got %d", fc.FieldNum) + } + + dat, ok := fc.MessageData() + if !ok { + return fmt.Errorf("expected message data for field %d", fc.FieldNum) + } + + var n, v string + var msgFc easyproto.FieldContext + for len(dat) > 0 { + dat, err = msgFc.NextField(dat) + if err != nil { + return errors.Wrap(err, "unmarshal next field in message") + } + + switch msgFc.FieldNum { + case 1: + n, ok = msgFc.String() + if !ok { + return fmt.Errorf("expected string data for field %d", msgFc.FieldNum) + } + case 2: + v, ok = msgFc.String() + if !ok { + return fmt.Errorf("expected string data for field %d", msgFc.FieldNum) + } + default: + return fmt.Errorf("unexpected field %d in label message", msgFc.FieldNum) + } + } + + b.Add(n, v) + + } + + *l = CustomLabelset(b.Labels()) + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/postings.go b/vendor/github.com/thanos-io/thanos/pkg/store/postings.go index e44dfbe66cb..adfb7e80f32 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/postings.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/postings.go @@ -116,11 +116,7 @@ func (r *postingsReaderBuilder) Next() bool { r.keyID = r.postings[r.pi].keyID r.pi++ - for { - if r.pi >= len(r.postings) { - break - } - + for r.pi < len(r.postings) { if r.postings[r.pi].ptr.Start == r.postings[r.pi-1].ptr.Start && r.postings[r.pi].ptr.End == r.postings[r.pi-1].ptr.End { r.repeatFor++ diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go index 3c30d70e8e8..1b2a744a65c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go @@ -12,6 +12,7 @@ import ( "net/http" "net/url" "path" + "slices" "sort" "strings" "sync" @@ -91,7 +92,7 @@ func NewPrometheusStore( promVersion: promVersion, timestamps: timestamps, remoteReadAcceptableResponses: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS, prompb.ReadRequest_SAMPLES}, - buffers: sync.Pool{New: func() interface{} { + buffers: sync.Pool{New: func() any { b := make([]byte, 0, initialBufSize) return &b }}, @@ -426,10 +427,7 @@ func (p *PrometheusStore) chunkSamples(series *prompb.TimeSeries, maxSamplesPerC defer hashPool.Put(hasher) for len(samples) > 0 { - chunkSize := len(samples) - if chunkSize > maxSamplesPerChunk { - chunkSize = maxSamplesPerChunk - } + chunkSize := min(len(samples), maxSamplesPerChunk) enc, cb, err := p.encodeChunk(samples[:chunkSize]) if err != nil { @@ -598,10 +596,8 @@ func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValue if r.Label == "" { return nil, status.Error(codes.InvalidArgument, "label name parameter cannot be empty") } - for i := range r.WithoutReplicaLabels { - if r.Label == r.WithoutReplicaLabels[i] { - return &storepb.LabelValuesResponse{}, nil - } + if slices.Contains(r.WithoutReplicaLabels, r.Label) { + return &storepb.LabelValuesResponse{}, nil } extLset := p.externalLabelsFn() diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go index 91b1aa31e92..02d03e14b0d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -176,7 +176,7 @@ func NewProxyStore( stores: stores, component: component, selectorLabels: selectorLabels, - buffers: sync.Pool{New: func() interface{} { + buffers: sync.Pool{New: func() any { b := make([]byte, 0, initialBufSize) return &b }}, @@ -328,7 +328,6 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. storeResponses := make([]respSet, 0, len(stores)) for _, st := range stores { - st := st respSet, err := newAsyncRespSet(ctx, st, r, s.responseTimeout, s.retrievalStrategy, &s.buffers, r.ShardInfo, reqLogger, s.metrics.emptyStreamResponses, s.lazyRetrievalMaxBufferedResponses) if err != nil { @@ -427,7 +426,6 @@ func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.La g, gctx = errgroup.WithContext(ctx) ) for _, st := range stores { - st := st storeID, storeAddr, isLocalStore := storeInfo(st) g.Go(func() error { @@ -531,7 +529,6 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L g, gctx = errgroup.WithContext(ctx) ) for _, st := range stores { - st := st storeID, storeAddr, isLocalStore := storeInfo(st) g.Go(func() error { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go index 98fd309c2fa..16134fef3ff 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go @@ -169,7 +169,7 @@ func (d *responseDeduplicator) At() *storepb.SeriesResponse { // NewProxyResponseLoserTree returns heap that k-way merge series together. // It's agnostic to duplicates and overlaps, it forwards all duplicated series in random order. func NewProxyResponseLoserTree(seriesSets ...respSet) *losertree.Tree[*storepb.SeriesResponse, respSet] { - var maxVal *storepb.SeriesResponse = storepb.NewSeriesResponse(nil) + var maxVal = storepb.NewSeriesResponse(nil) less := func(a, b *storepb.SeriesResponse) bool { if a == maxVal && b != maxVal { @@ -744,10 +744,7 @@ func newEagerRespSet( defer t.Stop() } - for { - if !handleRecvResponse(t) { - break - } + for handleRecvResponse(t) { } // This should be used only for stores that does not support doing this on server side. diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go index 6f56d42a84f..343b00edc66 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go @@ -128,7 +128,7 @@ func HistogramProtoToFloatHistogram(hp Histogram) *histogram.FloatHistogram { func spansProtoToSpans(s []BucketSpan) []histogram.Span { spans := make([]histogram.Span, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} } @@ -183,7 +183,7 @@ func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogra func spansToSpansProto(s []histogram.Span) []BucketSpan { spans := make([]BucketSpan, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go index d1d88cb92c0..30a985019c8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go @@ -8,6 +8,7 @@ import ( "hash" "io" "math" + "slices" "sort" "strings" "sync" @@ -116,7 +117,7 @@ func NewTSDBStore( maxBytesPerFrame: RemoteReadFrameLimit, storeFilter: filter.AllowAllStoreFilter{}, close: func() {}, - buffers: sync.Pool{New: func() interface{} { + buffers: sync.Pool{New: func() any { b := make([]byte, 0, initialBufSize) return &b }}, @@ -435,10 +436,8 @@ func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesReque return nil, status.Error(codes.InvalidArgument, "label name parameter cannot be empty") } - for i := range r.WithoutReplicaLabels { - if r.Label == r.WithoutReplicaLabels[i] { - return &storepb.LabelValuesResponse{}, nil - } + if slices.Contains(r.WithoutReplicaLabels, r.Label) { + return &storepb.LabelValuesResponse{}, nil } match, matchers, err := matchesExternalLabels(r.Matchers, s.getExtLset(), s.matcherCache) diff --git a/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go b/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go index a84f1ca6733..06e6d6fc7df 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go +++ b/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go @@ -38,10 +38,7 @@ func mergeTwoStringSlices(limit int, a, b []string) []string { a = truncateToLimit(limit, a) b = truncateToLimit(limit, b) - maxl := len(a) - if len(b) > len(a) { - maxl = len(b) - } + maxl := max(len(b), len(a)) res := make([]string, 0, maxl*10/9) diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go index 46a5136005f..ca35ce45ab8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go +++ b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go @@ -143,7 +143,7 @@ func ForeachPrometheus(t *testing.T, testFn func(t testing.TB, p *Prometheus)) { paths = PrometheusBinary() } - for _, path := range strings.Split(paths, " ") { + for path := range strings.SplitSeq(paths, " ") { if ok := t.Run(path, func(t *testing.T) { p, err := newPrometheus(path, "") testutil.Ok(t, err) @@ -521,6 +521,9 @@ func createBlockWithDelay(ctx context.Context, dir string, series []labels.Label logger := log.NewNopLogger() m.ULID = id m.Compaction.Sources = []ulid.ULID{id} + if blockDelay > 0 { + m.Thanos.UploadTime = timestamp.Time(int64(blockID.Time())).Add(-blockDelay) + } if err := m.WriteToDir(logger, path.Join(dir, blockID.String())); err != nil { return ulid.ULID{}, errors.Wrap(err, "write meta.json file") } @@ -576,7 +579,7 @@ func createBlock( g.Go(func() error { t := mint - for i := 0; i < numSamples; i++ { + for range numSamples { app := h.Appender(ctx) for _, lset := range batch { @@ -584,15 +587,17 @@ func createBlock( var sampleType = sampleTypes[si.Add(1)%int64(len(sampleTypes))] - if sampleType == chunkenc.ValFloat { + switch sampleType { + case chunkenc.ValFloat: randMutex.Lock() _, err = app.Append(0, lset, t, r.Float64()) randMutex.Unlock() - } else if sampleType == chunkenc.ValHistogram { + case chunkenc.ValHistogram: _, err = app.AppendHistogram(0, lset, t, &histogramSample, nil) - } else if sampleType == chunkenc.ValFloatHistogram { + case chunkenc.ValFloatHistogram: _, err = app.AppendHistogram(0, lset, t, nil, &floatHistogramSample) } + if err != nil { if rerr := app.Rollback(); rerr != nil { err = errors.Wrapf(err, "rollback failed: %v", rerr) @@ -750,11 +755,11 @@ func CreateBlockWithChurn( }() app := h.Appender(ctx) - for i := 0; i < len(series); i++ { + for i := range series { var ref storage.SeriesRef start := RandRange(rnd, mint, maxt) - for j := 0; j < numSamples; j++ { + for j := range numSamples { if ref == 0 { ref, err = app.Append(0, series[i], start, float64(i+j)) } else { diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/grpc.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/grpc.go index 93ec18cd18a..bda0501b6bd 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tracing/grpc.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/grpc.go @@ -25,7 +25,7 @@ func StreamClientInterceptor(tracer opentracing.Tracer) grpc.StreamClientInterce // UnaryServerInterceptor returns a new unary server interceptor for OpenTracing and injects given tracer. func UnaryServerInterceptor(tracer opentracing.Tracer) grpc.UnaryServerInterceptor { interceptor := grpc_opentracing.UnaryServerInterceptor(grpc_opentracing.WithTracer(tracer)) - return func(parentCtx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(parentCtx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { // Add our own tracer. return interceptor(ContextWithTracer(parentCtx, tracer), req, info, handler) } @@ -34,7 +34,7 @@ func UnaryServerInterceptor(tracer opentracing.Tracer) grpc.UnaryServerIntercept // StreamServerInterceptor returns a new streaming server interceptor for OpenTracing and injects given tracer. func StreamServerInterceptor(tracer opentracing.Tracer) grpc.StreamServerInterceptor { interceptor := grpc_opentracing.StreamServerInterceptor(grpc_opentracing.WithTracer(tracer)) - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv any, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { // Add our own tracer. wrappedStream := grpc_middleware.WrapServerStream(stream) wrappedStream.WrappedContext = ContextWithTracer(stream.Context(), tracer) diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/client.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/client.go index a45ec8fcb53..bb0b64e566b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/client.go @@ -19,7 +19,7 @@ import ( // UnaryClientInterceptor is a gRPC client-side interceptor that provides reporting for Unary RPCs. func UnaryClientInterceptor(reportable ClientReportable) grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { r := newReport(Unary, method) reporter, newCtx := reportable.ClientReporter(ctx, req, r.rpcType, r.service, r.method) @@ -64,14 +64,14 @@ type monitoredClientStream struct { reporter Reporter } -func (s *monitoredClientStream) SendMsg(m interface{}) error { +func (s *monitoredClientStream) SendMsg(m any) error { start := time.Now() err := s.ClientStream.SendMsg(m) s.reporter.PostMsgSend(m, err, time.Since(start)) return err } -func (s *monitoredClientStream) RecvMsg(m interface{}) error { +func (s *monitoredClientStream) RecvMsg(m any) error { start := time.Now() err := s.ClientStream.RecvMsg(m) s.reporter.PostMsgReceive(m, err, time.Since(start)) diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/reporter.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/reporter.go index 4ccdb0b61e6..1d91882711b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/reporter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/reporter.go @@ -48,27 +48,27 @@ func FullMethod(service, method string) string { } type ClientReportable interface { - ClientReporter(ctx context.Context, reqProtoOrNil interface{}, typ GRPCType, service string, method string) (Reporter, context.Context) + ClientReporter(ctx context.Context, reqProtoOrNil any, typ GRPCType, service string, method string) (Reporter, context.Context) } type ServerReportable interface { - ServerReporter(ctx context.Context, reqProtoOrNil interface{}, typ GRPCType, service string, method string) (Reporter, context.Context) + ServerReporter(ctx context.Context, reqProtoOrNil any, typ GRPCType, service string, method string) (Reporter, context.Context) } type Reporter interface { PostCall(err error, rpcDuration time.Duration) - PostMsgSend(reqProto interface{}, err error, sendDuration time.Duration) - PostMsgReceive(replyProto interface{}, err error, recvDuration time.Duration) + PostMsgSend(reqProto any, err error, sendDuration time.Duration) + PostMsgReceive(replyProto any, err error, recvDuration time.Duration) } var _ Reporter = NoopReporter{} type NoopReporter struct{} -func (NoopReporter) PostCall(error, time.Duration) {} -func (NoopReporter) PostMsgSend(interface{}, error, time.Duration) {} -func (NoopReporter) PostMsgReceive(interface{}, error, time.Duration) {} +func (NoopReporter) PostCall(error, time.Duration) {} +func (NoopReporter) PostMsgSend(any, error, time.Duration) {} +func (NoopReporter) PostMsgReceive(any, error, time.Duration) {} type report struct { rpcType GRPCType diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/server.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/server.go index 7bfed7bae5a..4eb5a0a80f4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/server.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/interceptors/server.go @@ -19,7 +19,7 @@ import ( // UnaryServerInterceptor is a gRPC server-side interceptor that provides reporting for Unary RPCs. func UnaryServerInterceptor(reportable ServerReportable) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { r := newReport(Unary, info.FullMethod) reporter, newCtx := reportable.ServerReporter(ctx, req, r.rpcType, r.service, r.method) @@ -34,7 +34,7 @@ func UnaryServerInterceptor(reportable ServerReportable) grpc.UnaryServerInterce // StreamServerInterceptor is a gRPC server-side interceptor that provides reporting for Streaming RPCs. func StreamServerInterceptor(reportable ServerReportable) grpc.StreamServerInterceptor { - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { r := newReport(ServerStream, info.FullMethod) reporter, newCtx := reportable.ServerReporter(ss.Context(), nil, streamRPCType(info), r.service, r.method) err := handler(srv, &monitoredServerStream{ServerStream: ss, newCtx: newCtx, reporter: reporter}) @@ -64,14 +64,14 @@ func (s *monitoredServerStream) Context() context.Context { return s.newCtx } -func (s *monitoredServerStream) SendMsg(m interface{}) error { +func (s *monitoredServerStream) SendMsg(m any) error { start := time.Now() err := s.ServerStream.SendMsg(m) s.reporter.PostMsgSend(m, err, time.Since(start)) return err } -func (s *monitoredServerStream) RecvMsg(m interface{}) error { +func (s *monitoredServerStream) RecvMsg(m any) error { start := time.Now() err := s.ServerStream.RecvMsg(m) s.reporter.PostMsgReceive(m, err, time.Since(start)) diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/migration/bridge.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/migration/bridge.go index 626f004c62b..5dba4c1fe48 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tracing/migration/bridge.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/migration/bridge.go @@ -89,7 +89,7 @@ func (b *bridgeTracerWrapper) StartSpan(operationName string, opts ...opentracin return b.bt.StartSpan(operationName, opts...) } -func (b *bridgeTracerWrapper) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error { +func (b *bridgeTracerWrapper) Inject(sm opentracing.SpanContext, format any, carrier any) error { otCarrier := opentracing.HTTPHeadersCarrier{} err := b.bt.Inject(sm, format, otCarrier) if err != nil { @@ -109,7 +109,7 @@ func (b *bridgeTracerWrapper) Inject(sm opentracing.SpanContext, format interfac return b.bt.Inject(sm, format, carrier) } -func (b *bridgeTracerWrapper) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { +func (b *bridgeTracerWrapper) Extract(format any, carrier any) (opentracing.SpanContext, error) { if tmr, ok := carrier.(opentracing.TextMapReader); ok { otCarrier := opentracing.HTTPHeadersCarrier{} err := tmr.ForeachKey(func(key, val string) error { diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/tracing_middleware/client.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/tracing_middleware/client.go index 77167c957f6..4102be6b71b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tracing/tracing_middleware/client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/tracing_middleware/client.go @@ -40,16 +40,16 @@ func (o *opentracingClientReporter) PostCall(err error, _ time.Duration) { o.clientSpan.Finish() } -func (o *opentracingClientReporter) PostMsgSend(interface{}, error, time.Duration) {} +func (o *opentracingClientReporter) PostMsgSend(any, error, time.Duration) {} -func (o *opentracingClientReporter) PostMsgReceive(interface{}, error, time.Duration) {} +func (o *opentracingClientReporter) PostMsgReceive(any, error, time.Duration) {} type opentracingClientReportable struct { tracer opentracing.Tracer filterOutFunc FilterFunc } -func (o *opentracingClientReportable) ClientReporter(ctx context.Context, _ interface{}, typ interceptors.GRPCType, service string, method string) (interceptors.Reporter, context.Context) { +func (o *opentracingClientReportable) ClientReporter(ctx context.Context, _ any, typ interceptors.GRPCType, service string, method string) (interceptors.Reporter, context.Context) { if o.filterOutFunc != nil && !o.filterOutFunc(ctx, method) { return interceptors.NoopReporter{}, ctx } diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/tracing_middleware/server.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/tracing_middleware/server.go index bf5019248b6..2b313c0bf5b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tracing/tracing_middleware/server.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/tracing_middleware/server.go @@ -43,9 +43,9 @@ func (o *opentracingServerReporter) PostCall(err error, _ time.Duration) { o.serverSpan.Finish() } -func (o *opentracingServerReporter) PostMsgSend(interface{}, error, time.Duration) {} +func (o *opentracingServerReporter) PostMsgSend(any, error, time.Duration) {} -func (o *opentracingServerReporter) PostMsgReceive(interface{}, error, time.Duration) {} +func (o *opentracingServerReporter) PostMsgReceive(any, error, time.Duration) {} type opentracingServerReportable struct { tracer opentracing.Tracer @@ -54,7 +54,7 @@ type opentracingServerReportable struct { filterOutFunc FilterFunc } -func (o *opentracingServerReportable) ServerReporter(ctx context.Context, _ interface{}, typ interceptors.GRPCType, service string, method string) (interceptors.Reporter, context.Context) { +func (o *opentracingServerReportable) ServerReporter(ctx context.Context, _ any, typ interceptors.GRPCType, service string, method string) (interceptors.Reporter, context.Context) { if o.filterOutFunc != nil && !o.filterOutFunc(ctx, interceptors.FullMethod(service, method)) { return interceptors.NoopReporter{}, ctx } diff --git a/vendor/modules.txt b/vendor/modules.txt index 292b3561ab9..bb693e1a923 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -132,6 +132,9 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapp # github.com/Masterminds/squirrel v1.5.4 ## explicit; go 1.14 github.com/Masterminds/squirrel +# github.com/VictoriaMetrics/easyproto v0.1.4 +## explicit; go 1.18 +github.com/VictoriaMetrics/easyproto # github.com/VictoriaMetrics/fastcache v1.12.2 ## explicit; go 1.13 github.com/VictoriaMetrics/fastcache @@ -1192,7 +1195,7 @@ github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3 +# github.com/thanos-io/objstore v0.0.0-20250804093838-71d60dfee488 ## explicit; go 1.24.0 github.com/thanos-io/objstore github.com/thanos-io/objstore/exthttp @@ -1226,8 +1229,8 @@ github.com/thanos-io/promql-engine/query github.com/thanos-io/promql-engine/ringbuffer github.com/thanos-io/promql-engine/storage github.com/thanos-io/promql-engine/storage/prometheus -# github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb -## explicit; go 1.24.0 +# github.com/thanos-io/thanos v0.40.1 +## explicit; go 1.25.0 github.com/thanos-io/thanos/pkg/api/query/querypb github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader @@ -1268,6 +1271,7 @@ github.com/thanos-io/thanos/pkg/rules/rulespb github.com/thanos-io/thanos/pkg/runutil github.com/thanos-io/thanos/pkg/server/http/middleware github.com/thanos-io/thanos/pkg/shipper +github.com/thanos-io/thanos/pkg/status/statuspb github.com/thanos-io/thanos/pkg/store github.com/thanos-io/thanos/pkg/store/cache github.com/thanos-io/thanos/pkg/store/cache/cachekey