Skip to content

Commit ee4c041

Browse files
committed
Increase min read size
1 parent 6a5cf7a commit ee4c041

File tree

3 files changed

+22
-1
lines changed

3 files changed

+22
-1
lines changed

docs/sources/shared/configuration.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4316,6 +4316,16 @@ discover_generic_fields:
43164316
# CLI flag: -querier.query-timeout
43174317
[query_timeout: <duration> | default = 1m]
43184318

4319+
# Maximum bytes per second for bucket GetObject operations during a query. 0
4320+
# means unlimited. Also expressible in human readable forms (1MB, 256KB, etc).
4321+
# CLI flag: -querier.query-bucket-get-object-rate-limit
4322+
[query_bucket_get_object_rate_limit: <int> | default = 0B]
4323+
4324+
# Maximum burst bytes for bucket GetObject operations during a query. 0 means
4325+
# unlimited. Also expressible in human readable forms (1MB, 256KB, etc).
4326+
# CLI flag: -querier.query-bucket-get-object-rate-limit-burst
4327+
[query_bucket_get_object_rate_limit_burst: <int> | default = 0B]
4328+
43194329
# Split queries by a time interval and execute in parallel. The value 0 disables
43204330
# splitting by time. This also determines how cache keys are chosen when result
43214331
# caching is enabled.

pkg/querier/testutil/limits.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,3 +99,11 @@ func (m *MockLimits) DebugEngineStreams(_ string) bool {
9999
func (m *MockLimits) DebugEngineTasks(_ string) bool {
100100
return false
101101
}
102+
103+
func (m *MockLimits) QueryBucketGetObjectRateLimit(_ context.Context, _ string) int64 {
104+
return 0 // 0 means unlimited
105+
}
106+
107+
func (m *MockLimits) QueryBucketGetObjectRateLimitBurst(_ context.Context, _ string) int64 {
108+
return 0 // 0 means unlimited
109+
}

pkg/storage/bucket/rate_limited_reader.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,10 @@ import (
1616

1717
// minReadSize is the minimum chunk size for reading data.
1818
// This ensures we read in reasonable-sized batches rather than very small ones.
19-
const minReadSize = 512
19+
// For typical 1-2MB objects (most of our chunks), 64KB provides a good balance between efficiency
20+
// and rate limiting responsiveness.
21+
// E.g. for 2MB object, 64KB read size is 32 reads, which is reasonable.
22+
const minReadSize = 64 * 1024 // 64KB
2023

2124
type rateLimiterKey struct{}
2225

0 commit comments

Comments
 (0)