diff --git a/CHANGELOG.md b/CHANGELOG.md index dc8f898f6..e2e4aa846 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +## [Unreleased] + +* [ENHANCEMENT] Add PostgreSQL 18 support: + * Add parallel worker activity metrics (`pg_stat_database_parallel_workers_to_launch`, `pg_stat_database_parallel_workers_launched`) + * Add vacuum/analyze timing metrics (`pg_stat_user_tables_total_vacuum_time`, `pg_stat_user_tables_total_autovacuum_time`, `pg_stat_user_tables_total_analyze_time`, `pg_stat_user_tables_total_autoanalyze_time`) + * Add enhanced checkpointer metrics (`pg_stat_bgwriter_checkpoints_done_total`, `pg_stat_bgwriter_slru_written_total`) + * Add `pg_stat_io` collector with byte statistics and WAL I/O activity tracking + * Add `pg_backend_stats` collector for per-backend I/O and WAL statistics + * Add enhanced `pg_backend_memory_contexts` collector with type and path columns +* [ENHANCEMENT] Update CI tested PostgreSQL versions to include PostgreSQL 18 + ## 0.15.0 / 2023-10-27 * [ENHANCEMENT] Add 1kB and 2kB units #915 diff --git a/README.md b/README.md index 429058e6d..b22e5f798 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Prometheus exporter for PostgreSQL server metrics. -CI Tested PostgreSQL versions: `11`, `12`, `13`, `14`, `15`, `16` +CI Tested PostgreSQL versions: `11`, `12`, `13`, `14`, `15`, `16`, `18` ## Quick Start This package is available for Docker: diff --git a/collector/pg_backend_memory_contexts.go b/collector/pg_backend_memory_contexts.go new file mode 100644 index 000000000..2146d69ba --- /dev/null +++ b/collector/pg_backend_memory_contexts.go @@ -0,0 +1,216 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "database/sql" + + "github.com/blang/semver/v4" + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) + +const backendMemoryContextsSubsystem = "backend_memory_contexts" + +func init() { + registerCollector(backendMemoryContextsSubsystem, defaultDisabled, NewPGBackendMemoryContextsCollector) +} + +type PGBackendMemoryContextsCollector struct { + log log.Logger +} + +func NewPGBackendMemoryContextsCollector(config collectorConfig) (Collector, error) { + return &PGBackendMemoryContextsCollector{log: config.logger}, nil +} + +var ( + backendMemoryContextsTotalBytes = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendMemoryContextsSubsystem, "total_bytes"), + "Total bytes allocated for memory context", + []string{"pid", "name", "ident", "parent", "level", "type", "path"}, + prometheus.Labels{}, + ) + backendMemoryContextsUsedBytes = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendMemoryContextsSubsystem, "used_bytes"), + "Used bytes in memory context", + []string{"pid", "name", "ident", "parent", "level", "type", "path"}, + prometheus.Labels{}, + ) + backendMemoryContextsFreeBytes = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendMemoryContextsSubsystem, "free_bytes"), + "Free bytes in memory context", + []string{"pid", "name", "ident", "parent", "level", "type", "path"}, + prometheus.Labels{}, + ) + backendMemoryContextsFreeChunks = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendMemoryContextsSubsystem, "free_chunks"), + "Number of free chunks in memory context", + []string{"pid", "name", "ident", "parent", "level", "type", "path"}, + prometheus.Labels{}, + ) + + // PostgreSQL 18+ query with type and path columns + backendMemoryContextsQuery18Plus = ` + SELECT + pid, + name, + COALESCE(ident, '') as ident, + COALESCE(parent, '') as parent, + level, + total_bytes, + total_nblocks, + free_bytes, + free_chunks, + used_bytes, + type, + path + FROM pg_backend_memory_contexts + ORDER BY pid, name + ` + + // Pre-PostgreSQL 18 query without type and path columns + backendMemoryContextsQueryPre18 = ` + SELECT + pid, + name, + COALESCE(ident, '') as ident, + COALESCE(parent, '') as parent, + level, + total_bytes, + total_nblocks, + free_bytes, + free_chunks, + used_bytes, + '' as type, + '' as path + FROM pg_backend_memory_contexts + ORDER BY pid, name + ` +) + +func (c *PGBackendMemoryContextsCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { + // pg_backend_memory_contexts was introduced in PostgreSQL 14 + if instance.version.LT(semver.Version{Major: 14}) { + return nil + } + + db := instance.getDB() + + // Use version-specific query for PostgreSQL 18+ + query := backendMemoryContextsQueryPre18 + if instance.version.GTE(semver.Version{Major: 18}) { + query = backendMemoryContextsQuery18Plus + } + + rows, err := db.QueryContext(ctx, query) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var pid, name, ident, parent, contextType, path sql.NullString + var level, totalNblocks, freeChunks sql.NullInt64 + var totalBytes, freeBytes, usedBytes sql.NullFloat64 + + err := rows.Scan( + &pid, + &name, + &ident, + &parent, + &level, + &totalBytes, + &totalNblocks, + &freeBytes, + &freeChunks, + &usedBytes, + &contextType, + &path, + ) + if err != nil { + return err + } + + pidLabel := "unknown" + if pid.Valid { + pidLabel = pid.String + } + nameLabel := "unknown" + if name.Valid { + nameLabel = name.String + } + identLabel := "" + if ident.Valid { + identLabel = ident.String + } + parentLabel := "" + if parent.Valid { + parentLabel = parent.String + } + levelLabel := "0" + if level.Valid { + levelLabel = string(rune(level.Int64 + '0')) + } + typeLabel := "" + if contextType.Valid { + typeLabel = contextType.String + } + pathLabel := "" + if path.Valid { + pathLabel = path.String + } + + labels := []string{pidLabel, nameLabel, identLabel, parentLabel, levelLabel, typeLabel, pathLabel} + + if totalBytes.Valid { + ch <- prometheus.MustNewConstMetric( + backendMemoryContextsTotalBytes, + prometheus.GaugeValue, + totalBytes.Float64, + labels..., + ) + } + + if usedBytes.Valid { + ch <- prometheus.MustNewConstMetric( + backendMemoryContextsUsedBytes, + prometheus.GaugeValue, + usedBytes.Float64, + labels..., + ) + } + + if freeBytes.Valid { + ch <- prometheus.MustNewConstMetric( + backendMemoryContextsFreeBytes, + prometheus.GaugeValue, + freeBytes.Float64, + labels..., + ) + } + + if freeChunks.Valid { + ch <- prometheus.MustNewConstMetric( + backendMemoryContextsFreeChunks, + prometheus.GaugeValue, + float64(freeChunks.Int64), + labels..., + ) + } + } + + return nil +} diff --git a/collector/pg_backend_stats.go b/collector/pg_backend_stats.go new file mode 100644 index 000000000..e28aa70ca --- /dev/null +++ b/collector/pg_backend_stats.go @@ -0,0 +1,310 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "database/sql" + + "github.com/blang/semver/v4" + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) + +const backendStatsSubsystem = "backend_stats" + +func init() { + registerCollector(backendStatsSubsystem, defaultDisabled, NewPGBackendStatsCollector) +} + +type PGBackendStatsCollector struct { + log log.Logger +} + +func NewPGBackendStatsCollector(config collectorConfig) (Collector, error) { + return &PGBackendStatsCollector{log: config.logger}, nil +} + +var ( + // Backend I/O statistics metrics (PostgreSQL 18+) + backendIOReads = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendStatsSubsystem, "io_reads_total"), + "Number of reads by backend (PostgreSQL 18+)", + []string{"pid", "backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + backendIOWrites = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendStatsSubsystem, "io_writes_total"), + "Number of writes by backend (PostgreSQL 18+)", + []string{"pid", "backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + backendIOExtends = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendStatsSubsystem, "io_extends_total"), + "Number of extends by backend (PostgreSQL 18+)", + []string{"pid", "backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + backendIOReadBytes = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendStatsSubsystem, "io_read_bytes_total"), + "Number of bytes read by backend (PostgreSQL 18+)", + []string{"pid", "backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + backendIOWriteBytes = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendStatsSubsystem, "io_write_bytes_total"), + "Number of bytes written by backend (PostgreSQL 18+)", + []string{"pid", "backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + backendIOExtendBytes = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendStatsSubsystem, "io_extend_bytes_total"), + "Number of bytes extended by backend (PostgreSQL 18+)", + []string{"pid", "backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + + // Backend WAL statistics metrics (PostgreSQL 18+) + backendWALRecords = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendStatsSubsystem, "wal_records_total"), + "Number of WAL records generated by backend (PostgreSQL 18+)", + []string{"pid"}, + prometheus.Labels{}, + ) + backendWALBytes = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendStatsSubsystem, "wal_bytes_total"), + "Number of WAL bytes generated by backend (PostgreSQL 18+)", + []string{"pid"}, + prometheus.Labels{}, + ) + backendWALBuffersUsed = prometheus.NewDesc( + prometheus.BuildFQName(namespace, backendStatsSubsystem, "wal_buffers_used_total"), + "Number of WAL buffers used by backend (PostgreSQL 18+)", + []string{"pid"}, + prometheus.Labels{}, + ) + + // Backend I/O query for PostgreSQL 18+ + backendIOQuery = ` + SELECT + pid, + backend_type, + io_context, + io_object, + reads, + writes, + extends, + read_bytes, + write_bytes, + extend_bytes + FROM pg_stat_get_backend_io(NULL) + WHERE pid IS NOT NULL + ` + + // Backend WAL query for PostgreSQL 18+ + backendWALQuery = ` + SELECT + pid, + wal_records, + wal_bytes, + wal_buffers_used + FROM pg_stat_get_backend_wal(NULL) + WHERE pid IS NOT NULL + ` +) + +func (c *PGBackendStatsCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { + // Backend statistics functions were introduced in PostgreSQL 18 + if instance.version.LT(semver.Version{Major: 18}) { + return nil + } + + db := instance.getDB() + + // Collect backend I/O statistics + if err := c.collectBackendIO(ctx, db, ch); err != nil { + return err + } + + // Collect backend WAL statistics + if err := c.collectBackendWAL(ctx, db, ch); err != nil { + return err + } + + return nil +} + +func (c *PGBackendStatsCollector) collectBackendIO(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { + rows, err := db.QueryContext(ctx, backendIOQuery) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var pid sql.NullString + var backendType, ioContext, ioObject sql.NullString + var reads, writes, extends, readBytes, writeBytes, extendBytes sql.NullFloat64 + + err := rows.Scan( + &pid, + &backendType, + &ioContext, + &ioObject, + &reads, + &writes, + &extends, + &readBytes, + &writeBytes, + &extendBytes, + ) + if err != nil { + return err + } + + pidLabel := "unknown" + if pid.Valid { + pidLabel = pid.String + } + backendTypeLabel := "unknown" + if backendType.Valid { + backendTypeLabel = backendType.String + } + ioContextLabel := "unknown" + if ioContext.Valid { + ioContextLabel = ioContext.String + } + ioObjectLabel := "unknown" + if ioObject.Valid { + ioObjectLabel = ioObject.String + } + + labels := []string{pidLabel, backendTypeLabel, ioContextLabel, ioObjectLabel} + + if reads.Valid { + ch <- prometheus.MustNewConstMetric( + backendIOReads, + prometheus.CounterValue, + reads.Float64, + labels..., + ) + } + + if writes.Valid { + ch <- prometheus.MustNewConstMetric( + backendIOWrites, + prometheus.CounterValue, + writes.Float64, + labels..., + ) + } + + if extends.Valid { + ch <- prometheus.MustNewConstMetric( + backendIOExtends, + prometheus.CounterValue, + extends.Float64, + labels..., + ) + } + + if readBytes.Valid { + ch <- prometheus.MustNewConstMetric( + backendIOReadBytes, + prometheus.CounterValue, + readBytes.Float64, + labels..., + ) + } + + if writeBytes.Valid { + ch <- prometheus.MustNewConstMetric( + backendIOWriteBytes, + prometheus.CounterValue, + writeBytes.Float64, + labels..., + ) + } + + if extendBytes.Valid { + ch <- prometheus.MustNewConstMetric( + backendIOExtendBytes, + prometheus.CounterValue, + extendBytes.Float64, + labels..., + ) + } + } + + return nil +} + +func (c *PGBackendStatsCollector) collectBackendWAL(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { + rows, err := db.QueryContext(ctx, backendWALQuery) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var pid sql.NullString + var walRecords, walBytes, walBuffersUsed sql.NullFloat64 + + err := rows.Scan( + &pid, + &walRecords, + &walBytes, + &walBuffersUsed, + ) + if err != nil { + return err + } + + pidLabel := "unknown" + if pid.Valid { + pidLabel = pid.String + } + + labels := []string{pidLabel} + + if walRecords.Valid { + ch <- prometheus.MustNewConstMetric( + backendWALRecords, + prometheus.CounterValue, + walRecords.Float64, + labels..., + ) + } + + if walBytes.Valid { + ch <- prometheus.MustNewConstMetric( + backendWALBytes, + prometheus.CounterValue, + walBytes.Float64, + labels..., + ) + } + + if walBuffersUsed.Valid { + ch <- prometheus.MustNewConstMetric( + backendWALBuffersUsed, + prometheus.CounterValue, + walBuffersUsed.Float64, + labels..., + ) + } + } + + return nil +} diff --git a/collector/pg_stat_bgwriter.go b/collector/pg_stat_bgwriter.go index 0b73b4f44..3606aa8c1 100644 --- a/collector/pg_stat_bgwriter.go +++ b/collector/pg_stat_bgwriter.go @@ -16,6 +16,7 @@ package collector import ( "context" "database/sql" + "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" ) @@ -100,6 +101,18 @@ var ( []string{"collector", "server"}, prometheus.Labels{}, ) + statCheckpointerNumDoneDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_done_total"), + "Number of completed checkpoints (PostgreSQL 18+)", + []string{"collector", "server"}, + prometheus.Labels{}, + ) + statCheckpointerSlruWrittenDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "slru_written_total"), + "Number of SLRU buffers written during checkpoints (PostgreSQL 18+)", + []string{"collector", "server"}, + prometheus.Labels{}, + ) ) var statBGWriter = map[string]*prometheus.Desc{ "percona_checkpoints_timed": prometheus.NewDesc( @@ -191,7 +204,7 @@ const statBGWriterQueryPost17 = `SELECT ,stats_reset FROM pg_stat_bgwriter;` -const statCheckpointerQuery = `SELECT +const statCheckpointerQueryPre18 = `SELECT num_timed ,num_requested ,restartpoints_timed @@ -201,12 +214,28 @@ const statCheckpointerQuery = `SELECT ,sync_time ,buffers_written ,stats_reset + ,NULL::bigint as num_done + ,NULL::bigint as slru_written + FROM pg_stat_checkpointer;` + +const statCheckpointerQuery18Plus = `SELECT + num_timed + ,num_requested + ,restartpoints_timed + ,restartpoints_req + ,restartpoints_done + ,write_time + ,sync_time + ,buffers_written + ,stats_reset + ,num_done + ,slru_written FROM pg_stat_checkpointer;` func (p PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() - var cpt, cpr, bcp, bc, mwc, bb, bbf, ba sql.NullInt64 + var cpt, cpr, bcp, bc, mwc, bb, bbf, ba, numDone, slruWritten sql.NullInt64 var cpwt, cpst sql.NullFloat64 var sr sql.NullTime @@ -219,10 +248,15 @@ func (p PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, } var rpt, rpr, rpd sql.NullInt64 var csr sql.NullTime - // these variables are not used, but I left them here for reference - row = db.QueryRowContext(ctx, - statCheckpointerQuery) - err = row.Scan(&cpt, &cpr, &rpt, &rpr, &rpd, &cpwt, &cpst, &bcp, &csr) + + // Use version-specific checkpointer query for PostgreSQL 18+ + checkpointerQuery := statCheckpointerQueryPre18 + if instance.version.GTE(semver.Version{Major: 18}) { + checkpointerQuery = statCheckpointerQuery18Plus + } + + row = db.QueryRowContext(ctx, checkpointerQuery) + err = row.Scan(&cpt, &cpr, &rpt, &rpr, &rpd, &cpwt, &cpst, &bcp, &csr, &numDone, &slruWritten) if err != nil { return err } @@ -357,6 +391,29 @@ func (p PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, instance.name, ) + // PostgreSQL 18+ checkpointer metrics + if numDone.Valid { + numDoneMetric := float64(numDone.Int64) + ch <- prometheus.MustNewConstMetric( + statCheckpointerNumDoneDesc, + prometheus.CounterValue, + numDoneMetric, + "exporter", + instance.name, + ) + } + + if slruWritten.Valid { + slruWrittenMetric := float64(slruWritten.Int64) + ch <- prometheus.MustNewConstMetric( + statCheckpointerSlruWrittenDesc, + prometheus.CounterValue, + slruWrittenMetric, + "exporter", + instance.name, + ) + } + // TODO: analyze metrics below, why do we duplicate them? ch <- prometheus.MustNewConstMetric( diff --git a/collector/pg_stat_database.go b/collector/pg_stat_database.go index 328afee2c..f8a3aeef5 100644 --- a/collector/pg_stat_database.go +++ b/collector/pg_stat_database.go @@ -17,6 +17,7 @@ import ( "context" "database/sql" + "github.com/blang/semver/v4" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -206,8 +207,26 @@ var ( []string{"datid", "datname"}, prometheus.Labels{}, ) + statDatabaseParallelWorkersToLaunch = prometheus.NewDesc(prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "parallel_workers_to_launch", + ), + "Number of parallel workers to launch (PostgreSQL 18+)", + []string{"datid", "datname"}, + prometheus.Labels{}, + ) + statDatabaseParallelWorkersLaunched = prometheus.NewDesc(prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "parallel_workers_launched", + ), + "Number of parallel workers launched (PostgreSQL 18+)", + []string{"datid", "datname"}, + prometheus.Labels{}, + ) - statDatabaseQuery = ` + statDatabaseQueryPre18 = ` SELECT datid ,datname @@ -228,15 +247,48 @@ var ( ,blk_read_time ,blk_write_time ,stats_reset + ,NULL::bigint as parallel_workers_to_launch + ,NULL::bigint as parallel_workers_launched + FROM pg_stat_database; + ` + + statDatabaseQuery18Plus = ` + SELECT + datid + ,datname + ,numbackends + ,xact_commit + ,xact_rollback + ,blks_read + ,blks_hit + ,tup_returned + ,tup_fetched + ,tup_inserted + ,tup_updated + ,tup_deleted + ,conflicts + ,temp_files + ,temp_bytes + ,deadlocks + ,blk_read_time + ,blk_write_time + ,stats_reset + ,parallel_workers_to_launch + ,parallel_workers_launched FROM pg_stat_database; ` ) func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() - rows, err := db.QueryContext(ctx, - statDatabaseQuery, - ) + + // Use version-specific query for PostgreSQL 18+ + query := statDatabaseQueryPre18 + if instance.version.GTE(semver.Version{Major: 18}) { + query = statDatabaseQuery18Plus + } + + rows, err := db.QueryContext(ctx, query) if err != nil { return err } @@ -246,6 +298,7 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance var datid, datname sql.NullString var numBackends, xactCommit, xactRollback, blksRead, blksHit, tupReturned, tupFetched, tupInserted, tupUpdated, tupDeleted, conflicts, tempFiles, tempBytes, deadlocks, blkReadTime, blkWriteTime sql.NullFloat64 var statsReset sql.NullTime + var parallelWorkersToLaunch, parallelWorkersLaunched sql.NullFloat64 err := rows.Scan( &datid, @@ -267,6 +320,8 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance &blkReadTime, &blkWriteTime, &statsReset, + ¶llelWorkersToLaunch, + ¶llelWorkersLaunched, ) if err != nil { return err @@ -473,6 +528,25 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance statsResetMetric, labels..., ) + + // PostgreSQL 18+ parallel worker metrics + if parallelWorkersToLaunch.Valid { + ch <- prometheus.MustNewConstMetric( + statDatabaseParallelWorkersToLaunch, + prometheus.CounterValue, + parallelWorkersToLaunch.Float64, + labels..., + ) + } + + if parallelWorkersLaunched.Valid { + ch <- prometheus.MustNewConstMetric( + statDatabaseParallelWorkersLaunched, + prometheus.CounterValue, + parallelWorkersLaunched.Float64, + labels..., + ) + } } return nil } diff --git a/collector/pg_stat_database_test.go b/collector/pg_stat_database_test.go index fe1b17066..90d442a5a 100644 --- a/collector/pg_stat_database_test.go +++ b/collector/pg_stat_database_test.go @@ -53,6 +53,8 @@ func TestPGStatDatabaseCollector(t *testing.T) { "blk_read_time", "blk_write_time", "stats_reset", + "parallel_workers_to_launch", + "parallel_workers_launched", } srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07") @@ -80,9 +82,12 @@ func TestPGStatDatabaseCollector(t *testing.T) { 925, 16, 823, - srT) + srT, + nil, + nil, + ) - mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows) + mock.ExpectQuery(sanitizeQuery(statDatabaseQueryPre18)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { @@ -160,6 +165,8 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) { "blk_read_time", "blk_write_time", "stats_reset", + "parallel_workers_to_launch", + "parallel_workers_launched", } rows := sqlmock.NewRows(columns). @@ -182,7 +189,10 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) { 925, 16, 823, - srT). + srT, + nil, + nil, + ). AddRow( "pid", "postgres", @@ -202,8 +212,11 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) { 925, 16, 823, - srT) - mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows) + srT, + nil, + nil, + ) + mock.ExpectQuery(sanitizeQuery(statDatabaseQueryPre18)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { @@ -276,6 +289,8 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) { "blk_read_time", "blk_write_time", "stats_reset", + "parallel_workers_to_launch", + "parallel_workers_launched", } srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07") @@ -303,7 +318,10 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) { 925, 16, 823, - srT). + srT, + nil, + nil, + ). AddRow( nil, nil, @@ -324,6 +342,8 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) { nil, nil, nil, + nil, + nil, ). AddRow( "pid", @@ -344,8 +364,11 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) { 926, 17, 824, - srT) - mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows) + srT, + nil, + nil, + ) + mock.ExpectQuery(sanitizeQuery(statDatabaseQueryPre18)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { @@ -436,6 +459,8 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) { "blk_read_time", "blk_write_time", "stats_reset", + "parallel_workers_to_launch", + "parallel_workers_launched", } rows := sqlmock.NewRows(columns). @@ -458,9 +483,12 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) { 925, 16, 823, - nil) + nil, + nil, + nil, + ) - mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows) + mock.ExpectQuery(sanitizeQuery(statDatabaseQueryPre18)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { diff --git a/collector/pg_stat_io.go b/collector/pg_stat_io.go new file mode 100644 index 000000000..70fe09bae --- /dev/null +++ b/collector/pg_stat_io.go @@ -0,0 +1,256 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "database/sql" + + "github.com/blang/semver/v4" + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) + +const statIOSubsystem = "stat_io" + +func init() { + registerCollector(statIOSubsystem, defaultDisabled, NewPGStatIOCollector) +} + +type PGStatIOCollector struct { + log log.Logger +} + +func NewPGStatIOCollector(config collectorConfig) (Collector, error) { + return &PGStatIOCollector{log: config.logger}, nil +} + +var ( + statIOReads = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statIOSubsystem, "reads_total"), + "Number of reads", + []string{"backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + statIOReadTime = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statIOSubsystem, "read_time_seconds_total"), + "Time spent reading", + []string{"backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + statIOWrites = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statIOSubsystem, "writes_total"), + "Number of writes", + []string{"backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + statIOWriteTime = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statIOSubsystem, "write_time_seconds_total"), + "Time spent writing", + []string{"backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + statIOExtends = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statIOSubsystem, "extends_total"), + "Number of extends", + []string{"backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + statIOReadBytes = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statIOSubsystem, "read_bytes_total"), + "Number of bytes read (PostgreSQL 18+)", + []string{"backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + statIOWriteBytes = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statIOSubsystem, "write_bytes_total"), + "Number of bytes written (PostgreSQL 18+)", + []string{"backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + statIOExtendBytes = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statIOSubsystem, "extend_bytes_total"), + "Number of bytes extended (PostgreSQL 18+)", + []string{"backend_type", "io_context", "io_object"}, + prometheus.Labels{}, + ) + + // PostgreSQL 18+ query with byte statistics and WAL I/O + StatIOQuery18Plus = ` + SELECT + backend_type, + io_context, + io_object, + reads, + read_time, + writes, + write_time, + extends, + read_bytes, + write_bytes, + extend_bytes + FROM pg_stat_io + ` + + // Pre-PostgreSQL 18 query without byte statistics + StatIOQueryPre18 = ` + SELECT + backend_type, + io_context, + io_object, + reads, + read_time, + writes, + write_time, + extends, + NULL::bigint as read_bytes, + NULL::bigint as write_bytes, + NULL::bigint as extend_bytes + FROM pg_stat_io + ` +) + +func (c *PGStatIOCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { + // pg_stat_io was introduced in PostgreSQL 16 + if instance.version.LT(semver.Version{Major: 16}) { + return nil + } + + db := instance.getDB() + + // Use version-specific query for PostgreSQL 18+ + query := StatIOQueryPre18 + if instance.version.GTE(semver.Version{Major: 18}) { + query = StatIOQuery18Plus + } + + rows, err := db.QueryContext(ctx, query) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var backendType, ioContext, ioObject sql.NullString + var reads, writes, extends, readBytes, writeBytes, extendBytes sql.NullFloat64 + var readTime, writeTime sql.NullFloat64 + + err := rows.Scan( + &backendType, + &ioContext, + &ioObject, + &reads, + &readTime, + &writes, + &writeTime, + &extends, + &readBytes, + &writeBytes, + &extendBytes, + ) + if err != nil { + return err + } + + backendTypeLabel := "unknown" + if backendType.Valid { + backendTypeLabel = backendType.String + } + ioContextLabel := "unknown" + if ioContext.Valid { + ioContextLabel = ioContext.String + } + ioObjectLabel := "unknown" + if ioObject.Valid { + ioObjectLabel = ioObject.String + } + + labels := []string{backendTypeLabel, ioContextLabel, ioObjectLabel} + + if reads.Valid { + ch <- prometheus.MustNewConstMetric( + statIOReads, + prometheus.CounterValue, + reads.Float64, + labels..., + ) + } + + if readTime.Valid { + ch <- prometheus.MustNewConstMetric( + statIOReadTime, + prometheus.CounterValue, + readTime.Float64/1000.0, // Convert milliseconds to seconds + labels..., + ) + } + + if writes.Valid { + ch <- prometheus.MustNewConstMetric( + statIOWrites, + prometheus.CounterValue, + writes.Float64, + labels..., + ) + } + + if writeTime.Valid { + ch <- prometheus.MustNewConstMetric( + statIOWriteTime, + prometheus.CounterValue, + writeTime.Float64/1000.0, // Convert milliseconds to seconds + labels..., + ) + } + + if extends.Valid { + ch <- prometheus.MustNewConstMetric( + statIOExtends, + prometheus.CounterValue, + extends.Float64, + labels..., + ) + } + + // PostgreSQL 18+ byte statistics + if readBytes.Valid { + ch <- prometheus.MustNewConstMetric( + statIOReadBytes, + prometheus.CounterValue, + readBytes.Float64, + labels..., + ) + } + + if writeBytes.Valid { + ch <- prometheus.MustNewConstMetric( + statIOWriteBytes, + prometheus.CounterValue, + writeBytes.Float64, + labels..., + ) + } + + if extendBytes.Valid { + ch <- prometheus.MustNewConstMetric( + statIOExtendBytes, + prometheus.CounterValue, + extendBytes.Float64, + labels..., + ) + } + } + + return nil +} diff --git a/collector/pg_stat_io_test.go b/collector/pg_stat_io_test.go new file mode 100644 index 000000000..d478ab8cb --- /dev/null +++ b/collector/pg_stat_io_test.go @@ -0,0 +1,140 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" +) + +func TestPGStatIOCollector(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub database connection: %s", err) + } + defer db.Close() + + inst := &instance{db: db, version: semver.MustParse("16.0.0")} + + columns := []string{"backend_type", "io_context", "io_object", "reads", "read_time", "writes", "write_time", "extends", "read_bytes", "write_bytes", "extend_bytes"} + rows := sqlmock.NewRows(columns). + AddRow("client backend", "normal", "relation", 100, 50.5, 75, 25.2, 10, nil, nil, nil) + mock.ExpectQuery("SELECT.*backend_type.*FROM pg_stat_io").WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGStatIOCollector{} + + if err := c.Update(context.Background(), inst, ch); err != nil { + t.Errorf("Error calling PGStatIOCollector.Update: %s", err) + } + }() + + expected := 5 // reads, read_time, writes, write_time, extends (no byte metrics for v16) + + metricCount := 0 + for m := range ch { + metricCount++ + _ = m + } + + if metricCount != expected { + t.Errorf("Expected %d metrics, got %d", expected, metricCount) + } + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("There were unfulfilled expectations: %s", err) + } +} + +func TestPGStatIOCollectorPostgreSQL18(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub database connection: %s", err) + } + defer db.Close() + + inst := &instance{db: db, version: semver.MustParse("18.0.0")} + + columns := []string{"backend_type", "io_context", "io_object", "reads", "read_time", "writes", "write_time", "extends", "read_bytes", "write_bytes", "extend_bytes"} + rows := sqlmock.NewRows(columns). + AddRow("client backend", "normal", "relation", 100, 50.5, 75, 25.2, 10, 1024, 2048, 512) + mock.ExpectQuery("SELECT.*backend_type.*FROM pg_stat_io").WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGStatIOCollector{} + + if err := c.Update(context.Background(), inst, ch); err != nil { + t.Errorf("Error calling PGStatIOCollector.Update: %s", err) + } + }() + + expected := 8 // reads, read_time, writes, write_time, extends, read_bytes, write_bytes, extend_bytes + + metricCount := 0 + for m := range ch { + metricCount++ + _ = m + } + + if metricCount != expected { + t.Errorf("Expected %d metrics, got %d", expected, metricCount) + } + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("There were unfulfilled expectations: %s", err) + } +} + +func TestPGStatIOCollectorPrePostgreSQL16(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub database connection: %s", err) + } + defer db.Close() + + inst := &instance{db: db, version: semver.MustParse("15.0.0")} + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGStatIOCollector{} + + if err := c.Update(context.Background(), inst, ch); err != nil { + t.Errorf("Error calling PGStatIOCollector.Update: %s", err) + } + }() + + // Should not make any queries for PostgreSQL < 16 + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("There were unfulfilled expectations: %s", err) + } + + metricCount := 0 + for m := range ch { + metricCount++ + _ = m + } + + if metricCount != 0 { + t.Errorf("Expected 0 metrics for PostgreSQL < 16, got %d", metricCount) + } +} diff --git a/collector/pg_stat_user_tables.go b/collector/pg_stat_user_tables.go index af3822ca8..9e75ec9b9 100644 --- a/collector/pg_stat_user_tables.go +++ b/collector/pg_stat_user_tables.go @@ -17,6 +17,7 @@ import ( "context" "database/sql" + "github.com/blang/semver/v4" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -156,8 +157,63 @@ var ( []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) + statUserTablesTotalVacuumTime = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "total_vacuum_time"), + "Time spent vacuuming this table, in milliseconds (PostgreSQL 18+)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + statUserTablesTotalAutovacuumTime = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "total_autovacuum_time"), + "Time spent auto-vacuuming this table, in milliseconds (PostgreSQL 18+)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + statUserTablesTotalAnalyzeTime = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "total_analyze_time"), + "Time spent analyzing this table, in milliseconds (PostgreSQL 18+)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + statUserTablesTotalAutoanalyzeTime = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "total_autoanalyze_time"), + "Time spent auto-analyzing this table, in milliseconds (PostgreSQL 18+)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesQueryPre18 = `SELECT + current_database() datname, + schemaname, + relname, + seq_scan, + seq_tup_read, + idx_scan, + idx_tup_fetch, + n_tup_ins, + n_tup_upd, + n_tup_del, + n_tup_hot_upd, + n_live_tup, + n_dead_tup, + n_mod_since_analyze, + COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, + COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, + COALESCE(last_analyze, '1970-01-01Z') as last_analyze, + COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, + vacuum_count, + autovacuum_count, + analyze_count, + autoanalyze_count, + pg_total_relation_size(relid) as total_size, + NULL::double precision as total_vacuum_time, + NULL::double precision as total_autovacuum_time, + NULL::double precision as total_analyze_time, + NULL::double precision as total_autoanalyze_time + FROM + pg_stat_user_tables` - statUserTablesQuery = `SELECT + statUserTablesQuery18Plus = `SELECT current_database() datname, schemaname, relname, @@ -180,15 +236,25 @@ var ( autovacuum_count, analyze_count, autoanalyze_count, - pg_total_relation_size(relid) as total_size + pg_total_relation_size(relid) as total_size, + total_vacuum_time, + total_autovacuum_time, + total_analyze_time, + total_autoanalyze_time FROM pg_stat_user_tables` ) func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() - rows, err := db.QueryContext(ctx, - statUserTablesQuery) + + // Use version-specific query for PostgreSQL 18+ + query := statUserTablesQueryPre18 + if instance.version.GTE(semver.Version{Major: 18}) { + query = statUserTablesQuery18Plus + } + + rows, err := db.QueryContext(ctx, query) if err != nil { return err @@ -200,8 +266,9 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan var seqScan, seqTupRead, idxScan, idxTupFetch, nTupIns, nTupUpd, nTupDel, nTupHotUpd, nLiveTup, nDeadTup, nModSinceAnalyze, vacuumCount, autovacuumCount, analyzeCount, autoanalyzeCount, totalSize sql.NullInt64 var lastVacuum, lastAutovacuum, lastAnalyze, lastAutoanalyze sql.NullTime + var totalVacuumTime, totalAutovacuumTime, totalAnalyzeTime, totalAutoanalyzeTime sql.NullFloat64 - if err := rows.Scan(&datname, &schemaname, &relname, &seqScan, &seqTupRead, &idxScan, &idxTupFetch, &nTupIns, &nTupUpd, &nTupDel, &nTupHotUpd, &nLiveTup, &nDeadTup, &nModSinceAnalyze, &lastVacuum, &lastAutovacuum, &lastAnalyze, &lastAutoanalyze, &vacuumCount, &autovacuumCount, &analyzeCount, &autoanalyzeCount, &totalSize); err != nil { + if err := rows.Scan(&datname, &schemaname, &relname, &seqScan, &seqTupRead, &idxScan, &idxTupFetch, &nTupIns, &nTupUpd, &nTupDel, &nTupHotUpd, &nLiveTup, &nDeadTup, &nModSinceAnalyze, &lastVacuum, &lastAutovacuum, &lastAnalyze, &lastAutoanalyze, &vacuumCount, &autovacuumCount, &analyzeCount, &autoanalyzeCount, &totalSize, &totalVacuumTime, &totalAutovacuumTime, &totalAnalyzeTime, &totalAutoanalyzeTime); err != nil { return err } @@ -437,6 +504,43 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan totalSizeMetric, datnameLabel, schemanameLabel, relnameLabel, ) + + // PostgreSQL 18+ vacuum/analyze timing metrics + if totalVacuumTime.Valid { + ch <- prometheus.MustNewConstMetric( + statUserTablesTotalVacuumTime, + prometheus.CounterValue, + totalVacuumTime.Float64, + datnameLabel, schemanameLabel, relnameLabel, + ) + } + + if totalAutovacuumTime.Valid { + ch <- prometheus.MustNewConstMetric( + statUserTablesTotalAutovacuumTime, + prometheus.CounterValue, + totalAutovacuumTime.Float64, + datnameLabel, schemanameLabel, relnameLabel, + ) + } + + if totalAnalyzeTime.Valid { + ch <- prometheus.MustNewConstMetric( + statUserTablesTotalAnalyzeTime, + prometheus.CounterValue, + totalAnalyzeTime.Float64, + datnameLabel, schemanameLabel, relnameLabel, + ) + } + + if totalAutoanalyzeTime.Valid { + ch <- prometheus.MustNewConstMetric( + statUserTablesTotalAutoanalyzeTime, + prometheus.CounterValue, + totalAutoanalyzeTime.Float64, + datnameLabel, schemanameLabel, relnameLabel, + ) + } } if err := rows.Err(); err != nil { diff --git a/collector/pg_stat_user_tables_test.go b/collector/pg_stat_user_tables_test.go index 5e82335c3..338b24cc3 100644 --- a/collector/pg_stat_user_tables_test.go +++ b/collector/pg_stat_user_tables_test.go @@ -72,7 +72,12 @@ func TestPGStatUserTablesCollector(t *testing.T) { "autovacuum_count", "analyze_count", "autoanalyze_count", - "total_size"} + "total_size", + "total_vacuum_time", + "total_autovacuum_time", + "total_analyze_time", + "total_autoanalyze_time", + } rows := sqlmock.NewRows(columns). AddRow("postgres", "public", @@ -96,8 +101,13 @@ func TestPGStatUserTablesCollector(t *testing.T) { 12, 13, 14, - 15) - mock.ExpectQuery(sanitizeQuery(statUserTablesQuery)).WillReturnRows(rows) + 15, + nil, + nil, + nil, + nil, + ) + mock.ExpectQuery(sanitizeQuery(statUserTablesQueryPre18)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { defer close(ch) @@ -173,7 +183,12 @@ func TestPGStatUserTablesCollectorNullValues(t *testing.T) { "autovacuum_count", "analyze_count", "autoanalyze_count", - "total_size"} + "total_size", + "total_vacuum_time", + "total_autovacuum_time", + "total_analyze_time", + "total_autoanalyze_time", + } rows := sqlmock.NewRows(columns). AddRow("postgres", nil, @@ -197,8 +212,13 @@ func TestPGStatUserTablesCollectorNullValues(t *testing.T) { nil, nil, nil, - nil) - mock.ExpectQuery(sanitizeQuery(statUserTablesQuery)).WillReturnRows(rows) + nil, + nil, + nil, + nil, + nil, + ) + mock.ExpectQuery(sanitizeQuery(statUserTablesQueryPre18)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { defer close(ch) diff --git a/queries-lr.yaml b/queries-lr.yaml index 64ffdf824..5b2268a04 100644 --- a/queries-lr.yaml +++ b/queries-lr.yaml @@ -22,7 +22,19 @@ pg_stat_user_tables: vacuum_count, autovacuum_count, analyze_count, - autoanalyze_count + autoanalyze_count, + CASE WHEN current_setting('server_version_num')::int >= 180000 + THEN COALESCE(total_vacuum_time, 0) + ELSE 0 END as total_vacuum_time, + CASE WHEN current_setting('server_version_num')::int >= 180000 + THEN COALESCE(total_autovacuum_time, 0) + ELSE 0 END as total_autovacuum_time, + CASE WHEN current_setting('server_version_num')::int >= 180000 + THEN COALESCE(total_analyze_time, 0) + ELSE 0 END as total_analyze_time, + CASE WHEN current_setting('server_version_num')::int >= 180000 + THEN COALESCE(total_autoanalyze_time, 0) + ELSE 0 END as total_autoanalyze_time FROM pg_stat_user_tables metrics: @@ -92,6 +104,18 @@ pg_stat_user_tables: - autoanalyze_count: usage: "COUNTER" description: "Number of times this table has been analyzed by the autovacuum daemon" + - total_vacuum_time: + usage: "COUNTER" + description: "Time spent vacuuming this table, in milliseconds (PostgreSQL 18+)" + - total_autovacuum_time: + usage: "COUNTER" + description: "Time spent auto-vacuuming this table, in milliseconds (PostgreSQL 18+)" + - total_analyze_time: + usage: "COUNTER" + description: "Time spent analyzing this table, in milliseconds (PostgreSQL 18+)" + - total_autoanalyze_time: + usage: "COUNTER" + description: "Time spent auto-analyzing this table, in milliseconds (PostgreSQL 18+)" pg_statio_user_tables: query: "SELECT current_database() datname, schemaname, relname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables" diff --git a/queries-mr.yaml b/queries-mr.yaml index 700e74b65..d950d1554 100644 --- a/queries-mr.yaml +++ b/queries-mr.yaml @@ -26,6 +26,34 @@ pg_database: - size_bytes: usage: "GAUGE" description: "Disk space used by the database" + +# PostgreSQL 18+ enhanced database statistics +pg_stat_database_18: + query: | + SELECT + datname, + CASE WHEN current_setting('server_version_num')::int >= 180000 + THEN COALESCE(parallel_workers_to_launch, 0) + ELSE 0 END as parallel_workers_to_launch, + CASE WHEN current_setting('server_version_num')::int >= 180000 + THEN COALESCE(parallel_workers_launched, 0) + ELSE 0 END as parallel_workers_launched + FROM + pg_stat_database + WHERE + datname IS NOT NULL + master: true + cache_seconds: 30 + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - parallel_workers_to_launch: + usage: "COUNTER" + description: "Number of parallel workers to launch (PostgreSQL 18+)" + - parallel_workers_launched: + usage: "COUNTER" + description: "Number of parallel workers launched (PostgreSQL 18+)" #### #pg_stat_statements: # query: "SELECT t2.rolname, t3.datname, queryid, calls, total_time / 1000 as total_time_seconds, min_time / 1000 as min_time_seconds, max_time / 1000 as max_time_seconds, mean_time / 1000 as mean_time_seconds, stddev_time / 1000 as stddev_time_seconds, rows, shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, temp_blks_read, temp_blks_written, blk_read_time / 1000 as blk_read_time_seconds, blk_write_time / 1000 as blk_write_time_seconds FROM pg_stat_statements t1 JOIN pg_roles t2 ON (t1.userid=t2.oid) JOIN pg_database t3 ON (t1.dbid=t3.oid) WHERE t2.rolname != 'rdsadmin'" diff --git a/tools/go.mod b/tools/go.mod index 63316e6a8..82f4b58d8 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -1,6 +1,7 @@ module github.com/percona/postgres_exporter/tools -go 1.22.1 +go 1.23.0 + toolchain go1.24.1 require (