From 105a5deff776243dddc0f24ccae7f1e5b47b0aba Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Wed, 31 May 2023 12:26:11 -0700 Subject: [PATCH 01/30] Move pg_post_master query to collector Signed-off-by: Felix Yuan --- collector/pg_postmaster.go | 58 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 collector/pg_postmaster.go diff --git a/collector/pg_postmaster.go b/collector/pg_postmaster.go new file mode 100644 index 000000000..bd3fb70e7 --- /dev/null +++ b/collector/pg_postmaster.go @@ -0,0 +1,58 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +func init() { + registerCollector("postmaster", defaultEnabled, NewPGPostmasterCollector) +} + +type PGPostmasterCollector struct { +} + +func NewPGPostmasterCollector(collectorConfig) (Collector, error) { + return &PGPostmasterCollector{}, nil +} + +var pgPostmaster = map[string]*prometheus.Desc{ + "start_time_seconds": prometheus.NewDesc( + "pg_postmaster_start_time_seconds", + "Time at which postmaster started", + []string{"process_name"}, nil, + ), +} + +func (c *PGPostmasterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { + row := db.QueryRowContext(ctx, + `SELECT + pg_postmaster_start_time + from pg_postmaster_start_time();`) + + var startTimeSeconds float64 + err := row.Scan(&startTimeSeconds) + if err != nil { + return err + } + ch <- prometheus.MustNewConstMetric( + pgPostmaster["start_time_seconds"], + prometheus.GaugeValue, startTimeSeconds, "postmaster", + ) + return nil +} From a94380741b61f6d6e466bded074c8805e674ac17 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Wed, 31 May 2023 12:26:46 -0700 Subject: [PATCH 02/30] move pg_replication to collector Signed-off-by: Felix Yuan --- collector/pg_replication.go | 60 +++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 collector/pg_replication.go diff --git a/collector/pg_replication.go b/collector/pg_replication.go new file mode 100644 index 000000000..8c242264d --- /dev/null +++ b/collector/pg_replication.go @@ -0,0 +1,60 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +func init() { + registerCollector("replication", defaultEnabled, NewPGReplicationCollector) +} + +type PGReplicationCollector struct { +} + +func NewPGReplicationCollector(collectorConfig) (Collector, error) { + return &PGPostmasterCollector{}, nil +} + +var pgReplication = map[string]*prometheus.Desc{ + "replication_lag": prometheus.NewDesc( + "pg_replication_lag", + "Replication lag behind master in seconds", + []string{"process_name"}, nil, + ), +} + +func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { + row := db.QueryRowContext(ctx, + `SELECT + CASE + WHEN NOT pg_is_in_recovery() THEN 0 + ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) + END AS lag`) + + var lag float64 + err := row.Scan(&lag) + if err != nil { + return err + } + ch <- prometheus.MustNewConstMetric( + pgPostmaster["replication_lag"], + prometheus.GaugeValue, lag, "replication", + ) + return nil +} From 91bd0523fdd4479c43a46a7baa8443087fa9a12a Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Wed, 31 May 2023 12:36:00 -0700 Subject: [PATCH 03/30] Move pg_stat_user_tables query to collector Signed-off-by: Felix Yuan --- collector/pg_stat_user_tables.go | 229 +++++++++++++++++++++++++++++++ 1 file changed, 229 insertions(+) create mode 100644 collector/pg_stat_user_tables.go diff --git a/collector/pg_stat_user_tables.go b/collector/pg_stat_user_tables.go new file mode 100644 index 000000000..59dc9a1f8 --- /dev/null +++ b/collector/pg_stat_user_tables.go @@ -0,0 +1,229 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "database/sql" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) + +func init() { + registerCollector("user_tables", defaultEnabled, NewPGStatUserTablesCollector) +} + +type PGStatUserTablesCollector struct { + log log.Logger +} + +var userTableSubsystem = "stat_user_tables" + +func NewPGStatUserTablesCollector(config collectorConfig) (Collector, error) { + return &PGStatUserTablesCollector{log: config.logger}, nil +} + +var statUserTables = map[string]*prometheus.Desc{ + "seq_scan": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "seq_scan"), + "Number of sequential scans initiated on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), +} + +func (c *PGStatUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { + rows, err := db.QueryContext(ctx, + `SELECT + current_database() datname, + schemaname, + relname, + seq_scan, + seq_tup_read, + idx_scan, + idx_tup_fetch, + n_tup_ins, + n_tup_upd, + n_tup_del, + n_tup_hot_upd, + n_live_tup, + n_dead_tup, + n_mod_since_analyze, + COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, + COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, + COALESCE(last_analyze, '1970-01-01Z') as last_analyze, + COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, + vacuum_count, + autovacuum_count, + analyze_count, + autoanalyze_count + FROM + pg_stat_user_tables`) + + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var datname string + var schemaname string + var relname string + var seqScan int64 + var seqTupRead int64 + var idxScan int64 + var idxTupFetch int64 + var nTupIns int64 + var nTupUpd int64 + var nTupDel int64 + var nTupHotUpd int64 + var nLiveTup int64 + var nDeadTup int64 + var nModSinceAnalyze int64 + var lastVacuum time.Time + var lastAutovacuum time.Time + var lastAnalyze time.Time + var lastAutoanalyze time.Time + var vacuumCount int64 + var autovacuumCount int64 + var analyzeCount int64 + var autoanalyzeCount int64 + + if err := rows.Scan(&datname, &schemaname, &relname, &seqScan, &seqTupRead, &idxScan, &idxTupFetch, &nTupIns, &nTupUpd, &nTupDel, &nTupHotUpd, &nLiveTup, &nDeadTup, &nModSinceAnalyze, &lastVacuum, &lastAutovacuum, &lastAnalyze, &lastAutoanalyze, &vacuumCount, &autovacuumCount, &analyzeCount, &autoanalyzeCount); err != nil { + return err + } + + ch <- prometheus.MustNewConstMetric( + statUserTables["seq_scan"], + prometheus.CounterValue, + float64(seqScan), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["seq_tup_read"], + prometheus.CounterValue, + float64(seqTupRead), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["idx_scan"], + prometheus.CounterValue, + float64(idxScan), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["idx_tup_fetch"], + prometheus.CounterValue, + float64(idxTupFetch), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["n_tup_ins"], + prometheus.CounterValue, + float64(nTupIns), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["n_tup_upd"], + prometheus.CounterValue, + float64(nTupUpd), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["n_tup_del"], + prometheus.CounterValue, + float64(nTupDel), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["n_tup_hot_upd"], + prometheus.CounterValue, + float64(nTupHotUpd), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["n_live_tup"], + prometheus.GaugeValue, + float64(nLiveTup), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["n_dead_tup"], + prometheus.GaugeValue, + float64(nDeadTup), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["n_mod_since_analyze"], + prometheus.GaugeValue, + float64(nModSinceAnalyze), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["last_vacuum"], + prometheus.GaugeValue, + float64(lastVacuum.Unix()), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["last_autovacuum"], + prometheus.GaugeValue, + float64(lastAutovacuum.Unix()), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["last_analyze"], + prometheus.GaugeValue, + float64(lastAnalyze.Unix()), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["last_autoanalyze"], + prometheus.GaugeValue, + float64(lastAutoanalyze.Unix()), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["vacuum_count"], + prometheus.CounterValue, + float64(vacuumCount), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["autovacuum_count"], + prometheus.CounterValue, + float64(autovacuumCount), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["analyze_count"], + prometheus.CounterValue, + float64(analyzeCount), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statUserTables["autoanalyze_count"], + prometheus.CounterValue, + float64(autoanalyzeCount), + datname, schemaname, relname, + ) + } + + if err := rows.Err(); err != nil { + return err + } + return nil +} From 11438ac30807869fc38b3fef51e46db438db0bfa Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Wed, 31 May 2023 12:48:18 -0700 Subject: [PATCH 04/30] Forgot to finish adding all the extra vars Signed-off-by: Felix Yuan --- collector/pg_stat_user_tables.go | 108 +++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/collector/pg_stat_user_tables.go b/collector/pg_stat_user_tables.go index 59dc9a1f8..ea187676e 100644 --- a/collector/pg_stat_user_tables.go +++ b/collector/pg_stat_user_tables.go @@ -43,6 +43,114 @@ var statUserTables = map[string]*prometheus.Desc{ []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ), + "seq_tup_read": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "seq_tup_read"), + "Number of live rows fetched by sequential scans", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "idx_scan": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "idx_scan"), + "Number of index scans initiated on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "idx_tup_fetch": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "idx_tup_fetch"), + "Number of live rows fetched by index scans", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "n_tup_ins": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_ins"), + "Number of rows inserted", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "n_tup_upd": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_upd"), + "Number of rows updated", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "n_tup_del": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_del"), + "Number of rows deleted", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "n_tup_hot_upd": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_hot_upd"), + "Number of rows HOT updated (i.e., with no separate index update required)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "n_live_tup": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_live_tup"), + "Estimated number of live rows", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "n_dead_tup": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_dead_tup"), + "Estimated number of dead rows", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "n_mod_since_analyze": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_mod_since_analyze"), + "Estimated number of rows changed since last analyze", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "last_vacuum": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_vacuum"), + "Last time at which this table was manually vacuumed (not counting VACUUM FULL)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "last_autovacuum": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_autovacuum"), + "Last time at which this table was vacuumed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "last_analyze": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_analyze"), + "Last time at which this table was manually analyzed", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "last_autoanalyze": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_autoanalyze"), + "Last time at which this table was analyzed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "vacuum_count": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "vacuum_count"), + "Number of times this table has been manually vacuumed (not counting VACUUM FULL)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "autovacuum_count": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "autovacuum_count"), + "Number of times this table has been vacuumed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "analyze_count": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "analyze_count"), + "Number of times this table has been manually analyzed", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "autoanalyze_count": prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "autoanalyze_count"), + "Number of times this table has been analyzed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), } func (c *PGStatUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { From 8892838a2266c5c4217e13f7f4dca60119759cf5 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Wed, 31 May 2023 13:08:44 -0700 Subject: [PATCH 05/30] Move statio_user_tables to collector Signed-off-by: Felix Yuan --- collector/pg_statio_user_tables.go | 180 +++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) create mode 100644 collector/pg_statio_user_tables.go diff --git a/collector/pg_statio_user_tables.go b/collector/pg_statio_user_tables.go new file mode 100644 index 000000000..7434fc734 --- /dev/null +++ b/collector/pg_statio_user_tables.go @@ -0,0 +1,180 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "database/sql" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) + +func init() { + registerCollector("statio_user_tables", defaultEnabled, NewPGStatIOUserTablesCollector) +} + +type PGStatIOUserTablesCollector struct { + log log.Logger +} + +var statioUserTableSubsystem = "statio_user_tables" + +func NewPGStatIOUserTablesCollector(config collectorConfig) (Collector, error) { + return &PGStatIOUserTablesCollector{log: config.logger}, nil +} + +var statioUserTables = map[string]*prometheus.Desc{ + "heap_blks_read": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_read"), + "Number of disk blocks read from this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "heap_blks_hit": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_hit"), + "Number of buffer hits in this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "idx_blks_read": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_read"), + "Number of disk blocks read from all indexes on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "idx_blks_hit": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_hit"), + "Number of buffer hits in all indexes on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "toast_blks_read": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_read"), + "Number of disk blocks read from this table's TOAST table (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "toast_blks_hit": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_hit"), + "Number of buffer hits in this table's TOAST table (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "tidx_blks_read": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_read"), + "Number of disk blocks read from this table's TOAST table indexes (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), + "tidx_blks_hit": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_hit"), + "Number of buffer hits in this table's TOAST table indexes (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ), +} + +func (PGStatIOUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { + rows, err := db.QueryContext(ctx, + `SELECT + current_database() datname, + schemaname, + relname, + heap_blks_read, + heap_blks_hit, + idx_blks_read, + idx_blks_hit, + toast_blks_read, + toast_blks_hit, + tidx_blks_read, + tidx_blks_hit + FROM pg_statio_user_tables`) + + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var datname string + var schemaname string + var relname string + var heapBlksRead int64 + var heapBlksHit int64 + var idxBlksRead int64 + var idxBlksHit int64 + var toastBlksRead int64 + var toastBlksHit int64 + var tidxBlksRead int64 + var tidxBlksHit int64 + + if err := rows.Scan(&datname, &schemaname, &relname, &heapBlksRead, &heapBlksHit, &idxBlksRead, &idxBlksHit, &toastBlksRead, &toastBlksHit, &tidxBlksRead, &tidxBlksHit); err != nil { + return err + } + + ch <- prometheus.MustNewConstMetric( + statioUserTables["heap_blks_read"], + prometheus.CounterValue, + float64(heapBlksRead), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statioUserTables["heap_blks_hit"], + prometheus.CounterValue, + float64(heapBlksHit), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statioUserTables["idx_blks_read"], + prometheus.CounterValue, + float64(idxBlksRead), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statioUserTables["idx_blks_hit"], + prometheus.CounterValue, + float64(idxBlksHit), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statioUserTables["toast_blks_read"], + prometheus.CounterValue, + float64(toastBlksRead), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statioUserTables["toast_blks_hit"], + prometheus.CounterValue, + float64(toastBlksHit), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statioUserTables["tidx_blks_read"], + prometheus.CounterValue, + float64(tidxBlksRead), + datname, schemaname, relname, + ) + ch <- prometheus.MustNewConstMetric( + statioUserTables["tidx_blks_hit"], + prometheus.CounterValue, + float64(tidxBlksHit), + datname, schemaname, relname, + ) + } + if err := rows.Err(); err != nil { + return err + } + return nil +} From 1a46234f1bff75fbefba078b0dbc08e41fa257b2 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Wed, 31 May 2023 13:23:36 -0700 Subject: [PATCH 06/30] Move pg_stat_statements to collectors Signed-off-by: Felix Yuan --- collector/collector.go | 4 +- collector/pg_stat_statements.go | 150 ++++++++++++++++++++++++++++++++ 2 files changed, 152 insertions(+), 2 deletions(-) create mode 100644 collector/pg_stat_statements.go diff --git a/collector/collector.go b/collector/collector.go index e8e418d12..d50e1e72a 100644 --- a/collector/collector.go +++ b/collector/collector.go @@ -39,8 +39,8 @@ const ( // Namespace for all metrics. namespace = "pg" - defaultEnabled = true - // defaultDisabled = false + defaultEnabled = true + defaultDisabled = false ) var ( diff --git a/collector/pg_stat_statements.go b/collector/pg_stat_statements.go new file mode 100644 index 000000000..df8b98994 --- /dev/null +++ b/collector/pg_stat_statements.go @@ -0,0 +1,150 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "database/sql" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) + +func init() { + // WARNING: + // Disabled by default because this set of metrics can be quite expensive on a busy server + // Every unique query will cause a new timeseries to be created + registerCollector("statements", defaultDisabled, NewPGStatStatementsCollector) +} + +type PGStatStatementsCollector struct { + log log.Logger +} + +var statStatementsSubsystem = "stat_statements" + +func NewPGStatStatementsCollector(config collectorConfig) (Collector, error) { + return &PGStatStatementsCollector{log: config.logger}, nil +} + +var statStatements = map[string]*prometheus.Desc{ + "calls_total": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "calls_total"), + "Number of times executed", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, + ), + "seconds_total": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "seconds_total"), + "Total time spent in the statement, in seconds", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, + ), + "rows_total": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "rows_total"), + "Total number of rows retrieved or affected by the statement", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, + ), + "block_read_seconds_total": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_read_seconds_total"), + "Total time the statement spent reading blocks, in seconds", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, + ), + "block_write_seconds_total": prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_write_seconds_total"), + "Total time the statement spent writing blocks, in seconds", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, + ), +} + +func (PGStatStatementsCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { + rows, err := db.QueryContext(ctx, + `SELECT + pg_get_userbyid(userid) as user, + pg_database.datname, + pg_stat_statements.queryid, + pg_stat_statements.calls as calls_total, + pg_stat_statements.total_time / 1000.0 as seconds_total, + pg_stat_statements.rows as rows_total, + pg_stat_statements.blk_read_time / 1000.0 as block_read_seconds_total, + pg_stat_statements.blk_write_time / 1000.0 as block_write_seconds_total + FROM pg_stat_statements + JOIN pg_database + ON pg_database.oid = pg_stat_statements.dbid + WHERE + total_time > ( + SELECT percentile_cont(0.1) + WITHIN GROUP (ORDER BY total_time) + FROM pg_stat_statements + ) + ORDER BY seconds_total DESC + LIMIT 100`) + + if err != nil { + return err + } + defer rows.Close() + for rows.Next() { + var user string + var datname string + var queryid string + var callsTotal int64 + var secondsTotal float64 + var rowsTotal int64 + var blockReadSecondsTotal float64 + var blockWriteSecondsTotal float64 + + if err := rows.Scan(&user, &datname, &queryid, &callsTotal, &secondsTotal, &rowsTotal, &blockReadSecondsTotal, &blockWriteSecondsTotal); err != nil { + return err + } + + ch <- prometheus.MustNewConstMetric( + statStatements["calls_total"], + prometheus.CounterValue, + float64(callsTotal), + user, datname, queryid, + ) + ch <- prometheus.MustNewConstMetric( + statStatements["seconds_total"], + prometheus.CounterValue, + secondsTotal, + user, datname, queryid, + ) + ch <- prometheus.MustNewConstMetric( + statStatements["rows_total"], + prometheus.CounterValue, + float64(rowsTotal), + user, datname, queryid, + ) + ch <- prometheus.MustNewConstMetric( + statStatements["block_read_seconds_total"], + prometheus.CounterValue, + blockReadSecondsTotal, + user, datname, queryid, + ) + ch <- prometheus.MustNewConstMetric( + statStatements["block_write_seconds_total"], + prometheus.CounterValue, + blockWriteSecondsTotal, + user, datname, queryid, + ) + } + if err := rows.Err(); err != nil { + return err + } + return nil +} From 6d3bb7dd55e155261ed8dddcb7ebdf8a2b637795 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Wed, 31 May 2023 13:44:44 -0700 Subject: [PATCH 07/30] Move pg_process_idle to collectors Signed-off-by: Felix Yuan --- collector/pg_process_idle.go | 108 +++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 collector/pg_process_idle.go diff --git a/collector/pg_process_idle.go b/collector/pg_process_idle.go new file mode 100644 index 000000000..07d53d633 --- /dev/null +++ b/collector/pg_process_idle.go @@ -0,0 +1,108 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "database/sql" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) + +func init() { + registerCollector("statements", defaultEnabled, NewPGProcessIdleCollector) +} + +type PGProcessIdleCollector struct { + log log.Logger +} + +var processIdleSubsystem = "process_idle" + +func NewPGProcessIdleCollector(config collectorConfig) (Collector, error) { + return &PGProcessIdleCollector{log: config.logger}, nil +} + +var processIdle = map[string]*prometheus.Desc{ + "seconds": prometheus.NewDesc( + prometheus.BuildFQName(namespace, processIdleSubsystem, "seconds"), + "Idle time of server processes", + []string{"application_name"}, + prometheus.Labels{}, + ), +} + +func (PGProcessIdleCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { + row := db.QueryRowContext(ctx, + `WITH + metrics AS ( + SELECT + application_name, + SUM(EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change))::bigint)::float AS process_idle_seconds_sum, + COUNT(*) AS process_idle_seconds_count + FROM pg_stat_activity + WHERE state = 'idle' + GROUP BY application_name + ), + buckets AS ( + SELECT + application_name, + le, + SUM( + CASE WHEN EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change)) <= le + THEN 1 + ELSE 0 + END + )::bigint AS bucket + FROM + pg_stat_activity, + UNNEST(ARRAY[1, 2, 5, 15, 30, 60, 90, 120, 300]) AS le + GROUP BY application_name, le + ORDER BY application_name, le + ) + SELECT + application_name, + process_idle_seconds_sum as seconds_sum, + process_idle_seconds_count as seconds_count, + ARRAY_AGG(le) AS seconds, + ARRAY_AGG(bucket) AS seconds_bucket + FROM metrics JOIN buckets USING (application_name) + GROUP BY 1, 2, 3;`) + + var applicationName string + var secondsSum int64 + var secondsCount uint64 + var seconds []int64 + var secondsBucket []uint64 + + err := row.Scan(&applicationName, &secondsSum, &secondsCount, &seconds, &secondsBucket) + + var buckets = make(map[float64]uint64, len(seconds)) + for i, second := range seconds { + if i >= len(secondsBucket) { + break + } + buckets[float64(second)] = secondsBucket[i] + } + if err != nil { + return err + } + ch <- prometheus.MustNewConstHistogram( + processIdle["seconds"], + secondsCount, float64(secondsSum), buckets, + applicationName, + ) + return nil +} From d0f4c26c58a1a81114e7f48dc704bddf28ddcc70 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Wed, 31 May 2023 13:49:37 -0700 Subject: [PATCH 08/30] Remove all queries from queries.yaml Signed-off-by: Felix Yuan --- queries.yaml | 244 --------------------------------------------------- 1 file changed, 244 deletions(-) diff --git a/queries.yaml b/queries.yaml index 1e54f326d..e69de29bb 100644 --- a/queries.yaml +++ b/queries.yaml @@ -1,244 +0,0 @@ -pg_replication: - query: "SELECT CASE WHEN NOT pg_is_in_recovery() THEN 0 ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) END AS lag" - master: true - metrics: - - lag: - usage: "GAUGE" - description: "Replication lag behind master in seconds" - -pg_postmaster: - query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()" - master: true - metrics: - - start_time_seconds: - usage: "GAUGE" - description: "Time at which postmaster started" - -pg_stat_user_tables: - query: | - SELECT - current_database() datname, - schemaname, - relname, - seq_scan, - seq_tup_read, - idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_live_tup, - n_dead_tup, - n_mod_since_analyze, - COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, - COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, - COALESCE(last_analyze, '1970-01-01Z') as last_analyze, - COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, - vacuum_count, - autovacuum_count, - analyze_count, - autoanalyze_count - FROM - pg_stat_user_tables - metrics: - - datname: - usage: "LABEL" - description: "Name of current database" - - schemaname: - usage: "LABEL" - description: "Name of the schema that this table is in" - - relname: - usage: "LABEL" - description: "Name of this table" - - seq_scan: - usage: "COUNTER" - description: "Number of sequential scans initiated on this table" - - seq_tup_read: - usage: "COUNTER" - description: "Number of live rows fetched by sequential scans" - - idx_scan: - usage: "COUNTER" - description: "Number of index scans initiated on this table" - - idx_tup_fetch: - usage: "COUNTER" - description: "Number of live rows fetched by index scans" - - n_tup_ins: - usage: "COUNTER" - description: "Number of rows inserted" - - n_tup_upd: - usage: "COUNTER" - description: "Number of rows updated" - - n_tup_del: - usage: "COUNTER" - description: "Number of rows deleted" - - n_tup_hot_upd: - usage: "COUNTER" - description: "Number of rows HOT updated (i.e., with no separate index update required)" - - n_live_tup: - usage: "GAUGE" - description: "Estimated number of live rows" - - n_dead_tup: - usage: "GAUGE" - description: "Estimated number of dead rows" - - n_mod_since_analyze: - usage: "GAUGE" - description: "Estimated number of rows changed since last analyze" - - last_vacuum: - usage: "GAUGE" - description: "Last time at which this table was manually vacuumed (not counting VACUUM FULL)" - - last_autovacuum: - usage: "GAUGE" - description: "Last time at which this table was vacuumed by the autovacuum daemon" - - last_analyze: - usage: "GAUGE" - description: "Last time at which this table was manually analyzed" - - last_autoanalyze: - usage: "GAUGE" - description: "Last time at which this table was analyzed by the autovacuum daemon" - - vacuum_count: - usage: "COUNTER" - description: "Number of times this table has been manually vacuumed (not counting VACUUM FULL)" - - autovacuum_count: - usage: "COUNTER" - description: "Number of times this table has been vacuumed by the autovacuum daemon" - - analyze_count: - usage: "COUNTER" - description: "Number of times this table has been manually analyzed" - - autoanalyze_count: - usage: "COUNTER" - description: "Number of times this table has been analyzed by the autovacuum daemon" - -pg_statio_user_tables: - query: "SELECT current_database() datname, schemaname, relname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables" - metrics: - - datname: - usage: "LABEL" - description: "Name of current database" - - schemaname: - usage: "LABEL" - description: "Name of the schema that this table is in" - - relname: - usage: "LABEL" - description: "Name of this table" - - heap_blks_read: - usage: "COUNTER" - description: "Number of disk blocks read from this table" - - heap_blks_hit: - usage: "COUNTER" - description: "Number of buffer hits in this table" - - idx_blks_read: - usage: "COUNTER" - description: "Number of disk blocks read from all indexes on this table" - - idx_blks_hit: - usage: "COUNTER" - description: "Number of buffer hits in all indexes on this table" - - toast_blks_read: - usage: "COUNTER" - description: "Number of disk blocks read from this table's TOAST table (if any)" - - toast_blks_hit: - usage: "COUNTER" - description: "Number of buffer hits in this table's TOAST table (if any)" - - tidx_blks_read: - usage: "COUNTER" - description: "Number of disk blocks read from this table's TOAST table indexes (if any)" - - tidx_blks_hit: - usage: "COUNTER" - description: "Number of buffer hits in this table's TOAST table indexes (if any)" - -# -# WARNING: -# This set of metrics can be very expensive on a busy server as every -# unique query executed will create an additional time series -# -# pg_stat_statements: -# query: | -# SELECT -# pg_get_userbyid(userid) as user, -# pg_database.datname, -# pg_stat_statements.queryid, -# pg_stat_statements.calls as calls_total, -# pg_stat_statements.total_time / 1000.0 as seconds_total, -# pg_stat_statements.rows as rows_total, -# pg_stat_statements.blk_read_time / 1000.0 as block_read_seconds_total, -# pg_stat_statements.blk_write_time / 1000.0 as block_write_seconds_total -# FROM pg_stat_statements -# JOIN pg_database -# ON pg_database.oid = pg_stat_statements.dbid -# WHERE -# total_time > ( -# SELECT percentile_cont(0.1) -# WITHIN GROUP (ORDER BY total_time) -# FROM pg_stat_statements -# ) -# ORDER BY seconds_total DESC -# LIMIT 100 -# metrics: -# - user: -# usage: "LABEL" -# description: "The user who executed the statement" -# - datname: -# usage: "LABEL" -# description: "The database in which the statement was executed" -# - queryid: -# usage: "LABEL" -# description: "Internal hash code, computed from the statement's parse tree" -# - calls_total: -# usage: "COUNTER" -# description: "Number of times executed" -# - seconds_total: -# usage: "COUNTER" -# description: "Total time spent in the statement, in seconds" -# - rows_total: -# usage: "COUNTER" -# description: "Total number of rows retrieved or affected by the statement" -# - block_read_seconds_total: -# usage: "COUNTER" -# description: "Total time the statement spent reading blocks, in seconds" -# - block_write_seconds_total: -# usage: "COUNTER" -# description: "Total time the statement spent writing blocks, in seconds" - -pg_process_idle: - query: | - WITH - metrics AS ( - SELECT - application_name, - SUM(EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change))::bigint)::float AS process_idle_seconds_sum, - COUNT(*) AS process_idle_seconds_count - FROM pg_stat_activity - WHERE state = 'idle' - GROUP BY application_name - ), - buckets AS ( - SELECT - application_name, - le, - SUM( - CASE WHEN EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change)) <= le - THEN 1 - ELSE 0 - END - )::bigint AS bucket - FROM - pg_stat_activity, - UNNEST(ARRAY[1, 2, 5, 15, 30, 60, 90, 120, 300]) AS le - GROUP BY application_name, le - ORDER BY application_name, le - ) - SELECT - application_name, - process_idle_seconds_sum as seconds_sum, - process_idle_seconds_count as seconds_count, - ARRAY_AGG(le) AS seconds, - ARRAY_AGG(bucket) AS seconds_bucket - FROM metrics JOIN buckets USING (application_name) - GROUP BY 1, 2, 3 - metrics: - - application_name: - usage: "LABEL" - description: "Application Name" - - seconds: - usage: "HISTOGRAM" - description: "Idle time of server processes" From 610fffb21aadc087224073fdc9c9d421fb368032 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Thu, 1 Jun 2023 02:38:52 -0700 Subject: [PATCH 09/30] Fix pgreplication Signed-off-by: Felix Yuan --- collector/pg_replication.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/collector/pg_replication.go b/collector/pg_replication.go index 8c242264d..a1948d29c 100644 --- a/collector/pg_replication.go +++ b/collector/pg_replication.go @@ -53,7 +53,7 @@ func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan return err } ch <- prometheus.MustNewConstMetric( - pgPostmaster["replication_lag"], + pgReplication["replication_lag"], prometheus.GaugeValue, lag, "replication", ) return nil From 15d2426947b77a94b6bcd9f81ff3bc2130992960 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Thu, 1 Jun 2023 12:52:40 -0700 Subject: [PATCH 10/30] Add is replica to replication metrics Signed-off-by: Felix Yuan --- collector/pg_replication.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/collector/pg_replication.go b/collector/pg_replication.go index a1948d29c..b2a78c0d9 100644 --- a/collector/pg_replication.go +++ b/collector/pg_replication.go @@ -37,6 +37,11 @@ var pgReplication = map[string]*prometheus.Desc{ "Replication lag behind master in seconds", []string{"process_name"}, nil, ), + "is_replica": prometheus.NewDesc( + "pg_replication_is_replica", + "Indicates if the server is a replica", + []string{"process_name"}, nil, + ), } func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { @@ -45,10 +50,15 @@ func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan CASE WHEN NOT pg_is_in_recovery() THEN 0 ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) - END AS lag`) + END AS lag, + CASE + WHEN pg_is_in_recovery() THEN 1 + ELSE 0 + END as is_replica`) var lag float64 - err := row.Scan(&lag) + var isReplica int64 + err := row.Scan(&lag, &isReplica) if err != nil { return err } @@ -56,5 +66,9 @@ func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan pgReplication["replication_lag"], prometheus.GaugeValue, lag, "replication", ) + ch <- prometheus.MustNewConstMetric( + pgReplication["is_replica"], + prometheus.GaugeValue, float64(isReplica), "replication", + ) return nil } From 6a358b8887c31e89259fa5c08f2d8ce3fe1d5923 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 12:31:30 -0700 Subject: [PATCH 11/30] Add note to queries.yaml Signed-off-by: Felix Yuan --- queries.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/queries.yaml b/queries.yaml index e69de29bb..189ce0866 100644 --- a/queries.yaml +++ b/queries.yaml @@ -0,0 +1,2 @@ +# Adding queries to this file is deprecated +# Example queries have been transformed into collectors. \ No newline at end of file From 741d6e60874019670f073549c3371d98ac15c711 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 13:23:29 -0700 Subject: [PATCH 12/30] Change desc pattern to use variables instead of a map Signed-off-by: Felix Yuan --- collector/pg_database.go | 4 +- collector/pg_postmaster.go | 16 +- collector/pg_process_idle.go | 16 +- collector/pg_replication.go | 31 ++-- collector/pg_stat_bgwriter.go | 164 +++++++++-------- collector/pg_stat_statements.go | 76 ++++---- collector/pg_stat_user_tables.go | 286 +++++++++++++++-------------- collector/pg_statio_user_tables.go | 121 ++++++------ collector/replication_slots.go | 38 ++-- 9 files changed, 389 insertions(+), 363 deletions(-) diff --git a/collector/pg_database.go b/collector/pg_database.go index 0d21385bf..60c333900 100644 --- a/collector/pg_database.go +++ b/collector/pg_database.go @@ -41,7 +41,7 @@ func NewPGDatabaseCollector(config collectorConfig) (Collector, error) { }, nil } -var pgDatabaseSizeDesc = prometheus.NewDesc( +var pgDatabaseSizeBytes = prometheus.NewDesc( "pg_database_size_bytes", "Disk space used by the database", []string{"datname"}, nil, @@ -94,7 +94,7 @@ func (c PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- p } ch <- prometheus.MustNewConstMetric( - pgDatabaseSizeDesc, + pgDatabaseSizeBytes, prometheus.GaugeValue, float64(size), datname, ) } diff --git a/collector/pg_postmaster.go b/collector/pg_postmaster.go index bd3fb70e7..db57837f6 100644 --- a/collector/pg_postmaster.go +++ b/collector/pg_postmaster.go @@ -31,13 +31,11 @@ func NewPGPostmasterCollector(collectorConfig) (Collector, error) { return &PGPostmasterCollector{}, nil } -var pgPostmaster = map[string]*prometheus.Desc{ - "start_time_seconds": prometheus.NewDesc( - "pg_postmaster_start_time_seconds", - "Time at which postmaster started", - []string{"process_name"}, nil, - ), -} +var pgPostMasterStartTimeSeconds = prometheus.NewDesc( + "pg_postmaster_start_time_seconds", + "Time at which postmaster started", + []string{}, nil, +) func (c *PGPostmasterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { row := db.QueryRowContext(ctx, @@ -51,8 +49,8 @@ func (c *PGPostmasterCollector) Update(ctx context.Context, db *sql.DB, ch chan< return err } ch <- prometheus.MustNewConstMetric( - pgPostmaster["start_time_seconds"], - prometheus.GaugeValue, startTimeSeconds, "postmaster", + pgPostMasterStartTimeSeconds, + prometheus.GaugeValue, startTimeSeconds, ) return nil } diff --git a/collector/pg_process_idle.go b/collector/pg_process_idle.go index 07d53d633..d9c88471b 100644 --- a/collector/pg_process_idle.go +++ b/collector/pg_process_idle.go @@ -35,14 +35,12 @@ func NewPGProcessIdleCollector(config collectorConfig) (Collector, error) { return &PGProcessIdleCollector{log: config.logger}, nil } -var processIdle = map[string]*prometheus.Desc{ - "seconds": prometheus.NewDesc( - prometheus.BuildFQName(namespace, processIdleSubsystem, "seconds"), - "Idle time of server processes", - []string{"application_name"}, - prometheus.Labels{}, - ), -} +var pgProcessIdleSeconds = prometheus.NewDesc( + prometheus.BuildFQName(namespace, processIdleSubsystem, "seconds"), + "Idle time of server processes", + []string{"application_name"}, + prometheus.Labels{}, +) func (PGProcessIdleCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { row := db.QueryRowContext(ctx, @@ -100,7 +98,7 @@ func (PGProcessIdleCollector) Update(ctx context.Context, db *sql.DB, ch chan<- return err } ch <- prometheus.MustNewConstHistogram( - processIdle["seconds"], + pgProcessIdleSeconds, secondsCount, float64(secondsSum), buckets, applicationName, ) diff --git a/collector/pg_replication.go b/collector/pg_replication.go index b2a78c0d9..f736dbb1a 100644 --- a/collector/pg_replication.go +++ b/collector/pg_replication.go @@ -31,18 +31,17 @@ func NewPGReplicationCollector(collectorConfig) (Collector, error) { return &PGPostmasterCollector{}, nil } -var pgReplication = map[string]*prometheus.Desc{ - "replication_lag": prometheus.NewDesc( - "pg_replication_lag", - "Replication lag behind master in seconds", - []string{"process_name"}, nil, - ), - "is_replica": prometheus.NewDesc( - "pg_replication_is_replica", - "Indicates if the server is a replica", - []string{"process_name"}, nil, - ), -} +var pgReplicationLag = prometheus.NewDesc( + "pg_replication_lag", + "Replication lag behind master in seconds", + []string{}, nil, +) + +var pgReplicationIsReplica = prometheus.NewDesc( + "pg_replication_is_replica", + "Indicates if the server is a replica", + []string{}, nil, +) func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { row := db.QueryRowContext(ctx, @@ -63,12 +62,12 @@ func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan return err } ch <- prometheus.MustNewConstMetric( - pgReplication["replication_lag"], - prometheus.GaugeValue, lag, "replication", + pgReplicationLag, + prometheus.GaugeValue, lag, ) ch <- prometheus.MustNewConstMetric( - pgReplication["is_replica"], - prometheus.GaugeValue, float64(isReplica), "replication", + pgReplicationIsReplica, + prometheus.GaugeValue, float64(isReplica), ) return nil } diff --git a/collector/pg_stat_bgwriter.go b/collector/pg_stat_bgwriter.go index 46e0baf4b..2b0d411c5 100644 --- a/collector/pg_stat_bgwriter.go +++ b/collector/pg_stat_bgwriter.go @@ -34,73 +34,81 @@ func NewPGStatBGWriterCollector(collectorConfig) (Collector, error) { const bgWriterSubsystem = "stat_bgwriter" -var ( - statBGWriterCheckpointsTimedDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"), - "Number of scheduled checkpoints that have been performed", - []string{}, - prometheus.Labels{}, - ) - statBGWriterCheckpointsReqDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"), - "Number of requested checkpoints that have been performed", - []string{}, - prometheus.Labels{}, - ) - statBGWriterCheckpointsReqTimeDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"), - "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", - []string{}, - prometheus.Labels{}, - ) - statBGWriterCheckpointsSyncTimeDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"), - "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersCheckpointDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"), - "Number of buffers written during checkpoints", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersCleanDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"), - "Number of buffers written by the background writer", - []string{}, - prometheus.Labels{}, - ) - statBGWriterMaxwrittenCleanDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"), - "Number of times the background writer stopped a cleaning scan because it had written too many buffers", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersBackendDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"), - "Number of buffers written directly by a backend", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersBackendFsyncDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"), - "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersAllocDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"), - "Number of buffers allocated", - []string{}, - prometheus.Labels{}, - ) - statBGWriterStatsResetDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"), - "Time at which these statistics were last reset", - []string{}, - prometheus.Labels{}, - ) +var statBGWriterCheckpointsTimed = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"), + "Number of scheduled checkpoints that have been performed", + []string{}, + prometheus.Labels{}, +) + +var statBGWriterCheckpointsReq = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"), + "Number of requested checkpoints that have been performed", + []string{}, + prometheus.Labels{}, +) + +var statBGWriterCheckpointWriteTime = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"), + "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", + []string{}, + prometheus.Labels{}, +) + +var statBGWriterCheckpointSyncTime = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"), + "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", + []string{}, + prometheus.Labels{}, +) + +var statBGWriterBuffersCheckpoint = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"), + "Number of buffers written during checkpoints", + []string{}, + prometheus.Labels{}, +) + +var statBGWriterBuffersClean = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"), + "Number of buffers written by the background writer", + []string{}, + prometheus.Labels{}, +) + +var statBGWriterMaxWrittenClean = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"), + "Number of times the background writer stopped a cleaning scan because it had written too many buffers", + []string{}, + prometheus.Labels{}, +) + +var statBGWriterBuffersBackend = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"), + "Number of buffers written directly by a backend", + []string{}, + prometheus.Labels{}, +) + +var statBGWriterBuffersBackendFsync = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"), + "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", + []string{}, + prometheus.Labels{}, +) + +var statBGWriterBuffersAlloc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"), + "Number of buffers allocated", + []string{}, + prometheus.Labels{}, +) + +var statBGWriterStatsReset = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"), + "Time at which these statistics were last reset", + []string{}, + prometheus.Labels{}, ) func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { @@ -137,57 +145,57 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- } ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsTimedDesc, + statBGWriterCheckpointsTimed, prometheus.CounterValue, float64(cpt), ) ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsReqDesc, + statBGWriterCheckpointsReq, prometheus.CounterValue, float64(cpr), ) ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsReqTimeDesc, + statBGWriterCheckpointWriteTime, prometheus.CounterValue, float64(cpwt), ) ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsSyncTimeDesc, + statBGWriterCheckpointSyncTime, prometheus.CounterValue, float64(cpst), ) ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersCheckpointDesc, + statBGWriterBuffersCheckpoint, prometheus.CounterValue, float64(bcp), ) ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersCleanDesc, + statBGWriterBuffersClean, prometheus.CounterValue, float64(bc), ) ch <- prometheus.MustNewConstMetric( - statBGWriterMaxwrittenCleanDesc, + statBGWriterMaxWrittenClean, prometheus.CounterValue, float64(mwc), ) ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersBackendDesc, + statBGWriterBuffersBackend, prometheus.CounterValue, float64(bb), ) ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersBackendFsyncDesc, + statBGWriterBuffersBackendFsync, prometheus.CounterValue, float64(bbf), ) ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersAllocDesc, + statBGWriterBuffersAlloc, prometheus.CounterValue, float64(ba), ) ch <- prometheus.MustNewConstMetric( - statBGWriterStatsResetDesc, + statBGWriterStatsReset, prometheus.CounterValue, float64(sr.Unix()), ) diff --git a/collector/pg_stat_statements.go b/collector/pg_stat_statements.go index df8b98994..8b4dff5c3 100644 --- a/collector/pg_stat_statements.go +++ b/collector/pg_stat_statements.go @@ -38,38 +38,40 @@ func NewPGStatStatementsCollector(config collectorConfig) (Collector, error) { return &PGStatStatementsCollector{log: config.logger}, nil } -var statStatements = map[string]*prometheus.Desc{ - "calls_total": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "calls_total"), - "Number of times executed", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ), - "seconds_total": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "seconds_total"), - "Total time spent in the statement, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ), - "rows_total": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "rows_total"), - "Total number of rows retrieved or affected by the statement", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ), - "block_read_seconds_total": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_read_seconds_total"), - "Total time the statement spent reading blocks, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ), - "block_write_seconds_total": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_write_seconds_total"), - "Total time the statement spent writing blocks, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ), -} +var statSTatementsCallsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "calls_total"), + "Number of times executed", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, +) + +var statStatementsSecondsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "seconds_total"), + "Total time spent in the statement, in seconds", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, +) + +var statStatementsRowsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "rows_total"), + "Total number of rows retrieved or affected by the statement", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, +) + +var statStatementsBlockReadSecondsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_read_seconds_total"), + "Total time the statement spent reading blocks, in seconds", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, +) + +var statStatementsBlockWriteSecondsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_write_seconds_total"), + "Total time the statement spent writing blocks, in seconds", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, +) func (PGStatStatementsCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.QueryContext(ctx, @@ -113,31 +115,31 @@ func (PGStatStatementsCollector) Update(ctx context.Context, db *sql.DB, ch chan } ch <- prometheus.MustNewConstMetric( - statStatements["calls_total"], + statSTatementsCallsTotal, prometheus.CounterValue, float64(callsTotal), user, datname, queryid, ) ch <- prometheus.MustNewConstMetric( - statStatements["seconds_total"], + statStatementsSecondsTotal, prometheus.CounterValue, secondsTotal, user, datname, queryid, ) ch <- prometheus.MustNewConstMetric( - statStatements["rows_total"], + statStatementsRowsTotal, prometheus.CounterValue, float64(rowsTotal), user, datname, queryid, ) ch <- prometheus.MustNewConstMetric( - statStatements["block_read_seconds_total"], + statStatementsBlockReadSecondsTotal, prometheus.CounterValue, blockReadSecondsTotal, user, datname, queryid, ) ch <- prometheus.MustNewConstMetric( - statStatements["block_write_seconds_total"], + statStatementsBlockWriteSecondsTotal, prometheus.CounterValue, blockWriteSecondsTotal, user, datname, queryid, diff --git a/collector/pg_stat_user_tables.go b/collector/pg_stat_user_tables.go index ea187676e..00c0ed2de 100644 --- a/collector/pg_stat_user_tables.go +++ b/collector/pg_stat_user_tables.go @@ -36,122 +36,138 @@ func NewPGStatUserTablesCollector(config collectorConfig) (Collector, error) { return &PGStatUserTablesCollector{log: config.logger}, nil } -var statUserTables = map[string]*prometheus.Desc{ - "seq_scan": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "seq_scan"), - "Number of sequential scans initiated on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "seq_tup_read": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "seq_tup_read"), - "Number of live rows fetched by sequential scans", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "idx_scan": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "idx_scan"), - "Number of index scans initiated on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "idx_tup_fetch": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "idx_tup_fetch"), - "Number of live rows fetched by index scans", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "n_tup_ins": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_ins"), - "Number of rows inserted", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "n_tup_upd": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_upd"), - "Number of rows updated", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "n_tup_del": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_del"), - "Number of rows deleted", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "n_tup_hot_upd": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_hot_upd"), - "Number of rows HOT updated (i.e., with no separate index update required)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "n_live_tup": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_live_tup"), - "Estimated number of live rows", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "n_dead_tup": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_dead_tup"), - "Estimated number of dead rows", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "n_mod_since_analyze": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_mod_since_analyze"), - "Estimated number of rows changed since last analyze", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "last_vacuum": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_vacuum"), - "Last time at which this table was manually vacuumed (not counting VACUUM FULL)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "last_autovacuum": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_autovacuum"), - "Last time at which this table was vacuumed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "last_analyze": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_analyze"), - "Last time at which this table was manually analyzed", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "last_autoanalyze": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_autoanalyze"), - "Last time at which this table was analyzed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "vacuum_count": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "vacuum_count"), - "Number of times this table has been manually vacuumed (not counting VACUUM FULL)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "autovacuum_count": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "autovacuum_count"), - "Number of times this table has been vacuumed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "analyze_count": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "analyze_count"), - "Number of times this table has been manually analyzed", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "autoanalyze_count": prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "autoanalyze_count"), - "Number of times this table has been analyzed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), -} +var statUserTablesSeqScan = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "seq_scan"), + "Number of sequential scans initiated on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesSeqTupRead = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "seq_tup_read"), + "Number of live rows fetched by sequential scans", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesIdxScan = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "idx_scan"), + "Number of index scans initiated on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesIdxTupFetch = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "idx_tup_fetch"), + "Number of live rows fetched by index scans", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesNTupIns = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_ins"), + "Number of rows inserted", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesNTupUpd = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_upd"), + "Number of rows updated", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesNTupDel = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_del"), + "Number of rows deleted", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesNTupHotUpd = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_hot_upd"), + "Number of rows HOT updated (i.e., with no separate index update required)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesNLiveTup = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_live_tup"), + "Estimated number of live rows", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesNDeadTup = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_dead_tup"), + "Estimated number of dead rows", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesNModSinceAnalyze = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_mod_since_analyze"), + "Estimated number of rows changed since last analyze", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesLastVacuum = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_vacuum"), + "Last time at which this table was manually vacuumed (not counting VACUUM FULL)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesLastAutovacuum = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_autovacuum"), + "Last time at which this table was vacuumed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesLastAnalyze = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_analyze"), + "Last time at which this table was manually analyzed", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesLastAutoanalyze = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_autoanalyze"), + "Last time at which this table was analyzed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesVacuumCount = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "vacuum_count"), + "Number of times this table has been manually vacuumed (not counting VACUUM FULL)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesAutovacuumCount = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "autovacuum_count"), + "Number of times this table has been vacuumed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesAnalyzeCount = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "analyze_count"), + "Number of times this table has been manually analyzed", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statUserTablesAutoanalyzeCount = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "autoanalyze_count"), + "Number of times this table has been analyzed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) func (c *PGStatUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.QueryContext(ctx, @@ -215,115 +231,115 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch c } ch <- prometheus.MustNewConstMetric( - statUserTables["seq_scan"], + statUserTablesSeqScan, prometheus.CounterValue, float64(seqScan), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["seq_tup_read"], + statUserTablesSeqTupRead, prometheus.CounterValue, float64(seqTupRead), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["idx_scan"], + statUserTablesIdxScan, prometheus.CounterValue, float64(idxScan), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["idx_tup_fetch"], + statUserTablesIdxTupFetch, prometheus.CounterValue, float64(idxTupFetch), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["n_tup_ins"], + statUserTablesNTupIns, prometheus.CounterValue, float64(nTupIns), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["n_tup_upd"], + statUserTablesNTupUpd, prometheus.CounterValue, float64(nTupUpd), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["n_tup_del"], + statUserTablesNTupDel, prometheus.CounterValue, float64(nTupDel), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["n_tup_hot_upd"], + statUserTablesNTupHotUpd, prometheus.CounterValue, float64(nTupHotUpd), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["n_live_tup"], + statUserTablesNLiveTup, prometheus.GaugeValue, float64(nLiveTup), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["n_dead_tup"], + statUserTablesNDeadTup, prometheus.GaugeValue, float64(nDeadTup), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["n_mod_since_analyze"], + statUserTablesNModSinceAnalyze, prometheus.GaugeValue, float64(nModSinceAnalyze), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["last_vacuum"], + statUserTablesLastVacuum, prometheus.GaugeValue, float64(lastVacuum.Unix()), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["last_autovacuum"], + statUserTablesLastAutovacuum, prometheus.GaugeValue, float64(lastAutovacuum.Unix()), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["last_analyze"], + statUserTablesLastAnalyze, prometheus.GaugeValue, float64(lastAnalyze.Unix()), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["last_autoanalyze"], + statUserTablesLastAutoanalyze, prometheus.GaugeValue, float64(lastAutoanalyze.Unix()), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["vacuum_count"], + statUserTablesVacuumCount, prometheus.CounterValue, float64(vacuumCount), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["autovacuum_count"], + statUserTablesAutovacuumCount, prometheus.CounterValue, float64(autovacuumCount), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["analyze_count"], + statUserTablesAnalyzeCount, prometheus.CounterValue, float64(analyzeCount), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statUserTables["autoanalyze_count"], + statUserTablesAutoanalyzeCount, prometheus.CounterValue, float64(autoanalyzeCount), datname, schemaname, relname, diff --git a/collector/pg_statio_user_tables.go b/collector/pg_statio_user_tables.go index 7434fc734..9ab3d8d93 100644 --- a/collector/pg_statio_user_tables.go +++ b/collector/pg_statio_user_tables.go @@ -35,56 +35,61 @@ func NewPGStatIOUserTablesCollector(config collectorConfig) (Collector, error) { return &PGStatIOUserTablesCollector{log: config.logger}, nil } -var statioUserTables = map[string]*prometheus.Desc{ - "heap_blks_read": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_read"), - "Number of disk blocks read from this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "heap_blks_hit": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_hit"), - "Number of buffer hits in this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "idx_blks_read": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_read"), - "Number of disk blocks read from all indexes on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "idx_blks_hit": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_hit"), - "Number of buffer hits in all indexes on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "toast_blks_read": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_read"), - "Number of disk blocks read from this table's TOAST table (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "toast_blks_hit": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_hit"), - "Number of buffer hits in this table's TOAST table (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "tidx_blks_read": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_read"), - "Number of disk blocks read from this table's TOAST table indexes (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), - "tidx_blks_hit": prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_hit"), - "Number of buffer hits in this table's TOAST table indexes (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ), -} +var statioUserTablesHeapBlksRead = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_read"), + "Number of disk blocks read from this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statioUserTablesHeapBlksHit = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_hit"), + "Number of buffer hits in this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statioUserTablesIdxBlksRead = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_read"), + "Number of disk blocks read from all indexes on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statioUserTablesIdxBlksHit = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_hit"), + "Number of buffer hits in all indexes on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statioUserTablesToastBlksRead = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_read"), + "Number of disk blocks read from this table's TOAST table (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statioUserTablesToastBlksHit = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_hit"), + "Number of buffer hits in this table's TOAST table (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statioUserTablesTidxBlksRead = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_read"), + "Number of disk blocks read from this table's TOAST table indexes (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) + +var statioUserTablesTidxBlksHit = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_hit"), + "Number of buffer hits in this table's TOAST table indexes (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, +) func (PGStatIOUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.QueryContext(ctx, @@ -125,49 +130,49 @@ func (PGStatIOUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch ch } ch <- prometheus.MustNewConstMetric( - statioUserTables["heap_blks_read"], + statioUserTablesHeapBlksRead, prometheus.CounterValue, float64(heapBlksRead), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statioUserTables["heap_blks_hit"], + statioUserTablesHeapBlksHit, prometheus.CounterValue, float64(heapBlksHit), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statioUserTables["idx_blks_read"], + statioUserTablesIdxBlksRead, prometheus.CounterValue, float64(idxBlksRead), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statioUserTables["idx_blks_hit"], + statioUserTablesIdxBlksHit, prometheus.CounterValue, float64(idxBlksHit), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statioUserTables["toast_blks_read"], + statioUserTablesToastBlksRead, prometheus.CounterValue, float64(toastBlksRead), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statioUserTables["toast_blks_hit"], + statioUserTablesToastBlksHit, prometheus.CounterValue, float64(toastBlksHit), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statioUserTables["tidx_blks_read"], + statioUserTablesTidxBlksRead, prometheus.CounterValue, float64(tidxBlksRead), datname, schemaname, relname, ) ch <- prometheus.MustNewConstMetric( - statioUserTables["tidx_blks_hit"], + statioUserTablesTidxBlksHit, prometheus.CounterValue, float64(tidxBlksHit), datname, schemaname, relname, diff --git a/collector/replication_slots.go b/collector/replication_slots.go index 3b3572726..d3e91755d 100644 --- a/collector/replication_slots.go +++ b/collector/replication_slots.go @@ -33,22 +33,22 @@ func NewPGReplicationSlotCollector(config collectorConfig) (Collector, error) { return &PGReplicationSlotCollector{log: config.logger}, nil } -var ( - pgReplicationSlotCurrentWalDesc = prometheus.NewDesc( - "pg_replication_slot_current_wal_lsn", - "current wal lsn value", - []string{"slot_name"}, nil, - ) - pgReplicationSlotCurrentFlushDesc = prometheus.NewDesc( - "pg_replication_slot_confirmed_flush_lsn", - "last lsn confirmed flushed to the replication slot", - []string{"slot_name"}, nil, - ) - pgReplicationSlotIsActiveDesc = prometheus.NewDesc( - "pg_replication_slot_is_active", - "last lsn confirmed flushed to the replication slot", - []string{"slot_name"}, nil, - ) +var pgReplicationSlotCurrentWalLSN = prometheus.NewDesc( + "pg_replication_slot_current_wal_lsn", + "current wal lsn value", + []string{"slot_name"}, nil, +) + +var pgReplicationSlotConfirmedFlushLSN = prometheus.NewDesc( + "pg_replication_slot_confirmed_flush_lsn", + "last lsn confirmed flushed to the replication slot", + []string{"slot_name"}, nil, +) + +var pgReplicationSlotActive = prometheus.NewDesc( + "pg_replication_slot_is_active", + "last lsn confirmed flushed to the replication slot", + []string{"slot_name"}, nil, ) func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { @@ -75,17 +75,17 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch cha } ch <- prometheus.MustNewConstMetric( - pgReplicationSlotCurrentWalDesc, + pgReplicationSlotCurrentWalLSN, prometheus.GaugeValue, float64(wal_lsn), slot_name, ) if is_active { ch <- prometheus.MustNewConstMetric( - pgReplicationSlotCurrentFlushDesc, + pgReplicationSlotConfirmedFlushLSN, prometheus.GaugeValue, float64(flush_lsn), slot_name, ) } ch <- prometheus.MustNewConstMetric( - pgReplicationSlotIsActiveDesc, + pgReplicationSlotActive, prometheus.GaugeValue, float64(flush_lsn), slot_name, ) } From 865c4c5272399146dacec957f5e1d97f27e2eb63 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 13:58:49 -0700 Subject: [PATCH 13/30] Add first set of tests + some helper functions cribbed from mysqld_exporter Signed-off-by: Felix Yuan --- collector/collector_test.go | 44 ++++++++++++++++++++++++++++++++ collector/pg_database.go | 9 ++++--- collector/pg_database_test.go | 47 +++++++++++++++++++++++++++++++++++ go.mod | 5 ++++ go.sum | 10 ++++++++ 5 files changed, 111 insertions(+), 4 deletions(-) create mode 100644 collector/collector_test.go create mode 100644 collector/pg_database_test.go diff --git a/collector/collector_test.go b/collector/collector_test.go new file mode 100644 index 000000000..8450920e1 --- /dev/null +++ b/collector/collector_test.go @@ -0,0 +1,44 @@ +package collector + +import ( + "strings" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +type labelMap map[string]string + +type MetricResult struct { + labels labelMap + value float64 + metricType dto.MetricType +} + +func readMetric(m prometheus.Metric) MetricResult { + pb := &dto.Metric{} + m.Write(pb) + labels := make(labelMap, len(pb.Label)) + for _, v := range pb.Label { + labels[v.GetName()] = v.GetValue() + } + if pb.Gauge != nil { + return MetricResult{labels: labels, value: pb.GetGauge().GetValue(), metricType: dto.MetricType_GAUGE} + } + if pb.Counter != nil { + return MetricResult{labels: labels, value: pb.GetCounter().GetValue(), metricType: dto.MetricType_COUNTER} + } + if pb.Untyped != nil { + return MetricResult{labels: labels, value: pb.GetUntyped().GetValue(), metricType: dto.MetricType_UNTYPED} + } + panic("Unsupported metric type") +} + +func sanitizeQuery(q string) string { + q = strings.Join(strings.Fields(q), " ") + q = strings.Replace(q, "(", "\\(", -1) + q = strings.Replace(q, ")", "\\)", -1) + q = strings.Replace(q, "*", "\\*", -1) + q = strings.Replace(q, "$", "\\$", -1) + return q +} diff --git a/collector/pg_database.go b/collector/pg_database.go index 60c333900..d6fc5bda1 100644 --- a/collector/pg_database.go +++ b/collector/pg_database.go @@ -47,6 +47,9 @@ var pgDatabaseSizeBytes = prometheus.NewDesc( []string{"datname"}, nil, ) +var pgDatabaseQuery = "SELECT pg_database.datname FROM pg_database;" +var pgDatabaseSizeQuery = "SELECT pg_database_size($1)" + // Update implements Collector and exposes database size. // It is called by the Prometheus registry when collecting metrics. // The list of databases is retrieved from pg_database and filtered @@ -58,9 +61,7 @@ var pgDatabaseSizeBytes = prometheus.NewDesc( func (c PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { // Query the list of databases rows, err := db.QueryContext(ctx, - `SELECT pg_database.datname - FROM pg_database; - `, + pgDatabaseQuery, ) if err != nil { return err @@ -88,7 +89,7 @@ func (c PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- p // Query the size of the databases for _, datname := range databases { var size int64 - err = db.QueryRowContext(ctx, "SELECT pg_database_size($1)", datname).Scan(&size) + err = db.QueryRowContext(ctx, pgDatabaseSizeQuery, datname).Scan(&size) if err != nil { return err } diff --git a/collector/pg_database_test.go b/collector/pg_database_test.go new file mode 100644 index 000000000..982e4a3ea --- /dev/null +++ b/collector/pg_database_test.go @@ -0,0 +1,47 @@ +package collector + +import ( + "context" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" +) + +func TestPGDatabaseCollector(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub db connection: %s", err) + } + defer db.Close() + + mock.ExpectQuery(sanitizeQuery(pgDatabaseQuery)).WillReturnRows(sqlmock.NewRows([]string{"datname"}). + AddRow("postgres")) + + mock.ExpectQuery(sanitizeQuery(pgDatabaseSizeQuery)).WithArgs("postgres").WillReturnRows(sqlmock.NewRows([]string{"pg_database_size"}). + AddRow(1024)) + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGDatabaseCollector{} + if err := c.Update(context.Background(), db, ch); err != nil { + t.Errorf("Error calling PGDatabaseCollector.Update: %s", err) + } + }() + + expected := []MetricResult{ + {labels: labelMap{"datname": "postgres"}, value: 1024, metricType: dto.MetricType_GAUGE}, + } + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + m := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, m) + } + }) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} diff --git a/go.mod b/go.mod index 4d6214c7a..aeeec3d0e 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/prometheus-community/postgres_exporter go 1.19 require ( + github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/alecthomas/kingpin/v2 v2.3.2 github.com/blang/semver/v4 v4.0.0 github.com/go-kit/log v0.2.1 @@ -23,13 +24,17 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/golang/protobuf v1.5.2 // indirect + github.com/gopherjs/gopherjs v1.17.2 // indirect github.com/jpillora/backoff v1.0.0 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect + github.com/smartystreets/assertions v1.13.1 // indirect + github.com/smartystreets/goconvey v1.8.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect golang.org/x/crypto v0.7.0 // indirect golang.org/x/net v0.8.0 // indirect diff --git a/go.sum b/go.sum index 4669d9acc..a9758d5fd 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= @@ -26,8 +28,12 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -56,6 +62,10 @@ github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJf github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= +github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= +github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w= +github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= From cd84f17b9503d9448fc7a08299a7efcb48f67f69 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 14:08:22 -0700 Subject: [PATCH 14/30] Add test for postmaster Signed-off-by: Felix Yuan --- collector/pg_postmaster.go | 6 ++--- collector/pg_postmaster_test.go | 45 +++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 3 deletions(-) create mode 100644 collector/pg_postmaster_test.go diff --git a/collector/pg_postmaster.go b/collector/pg_postmaster.go index db57837f6..28dbba7d4 100644 --- a/collector/pg_postmaster.go +++ b/collector/pg_postmaster.go @@ -37,11 +37,11 @@ var pgPostMasterStartTimeSeconds = prometheus.NewDesc( []string{}, nil, ) +var pgPostmasterQuery = "SELECT pg_postmaster_start_time from pg_postmaster_start_time();" + func (c *PGPostmasterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { row := db.QueryRowContext(ctx, - `SELECT - pg_postmaster_start_time - from pg_postmaster_start_time();`) + pgPostmasterQuery) var startTimeSeconds float64 err := row.Scan(&startTimeSeconds) diff --git a/collector/pg_postmaster_test.go b/collector/pg_postmaster_test.go new file mode 100644 index 000000000..4fde08146 --- /dev/null +++ b/collector/pg_postmaster_test.go @@ -0,0 +1,45 @@ +package collector + +import ( + "context" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" +) + +func TestPgPostmasterCollector(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub db connection: %s", err) + } + defer db.Close() + + mock.ExpectQuery(sanitizeQuery(pgPostmasterQuery)).WillReturnRows(sqlmock.NewRows([]string{"pg_postmaster_start_time"}). + AddRow(1685739904)) + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGPostmasterCollector{} + + if err := c.Update(context.Background(), db, ch); err != nil { + t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) + } + }() + + expected := []MetricResult{ + {labels: labelMap{}, value: 1685739904, metricType: dto.MetricType_GAUGE}, + } + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + m := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, m) + } + }) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} From b588ec9715e500a0272c6739c74e2f9d5287d6be Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 15:11:23 -0700 Subject: [PATCH 15/30] Add replication slot test Signed-off-by: Felix Yuan --- collector/replication_slots.go | 29 +++++++--- collector/replication_slots_test.go | 89 +++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+), 9 deletions(-) create mode 100644 collector/replication_slots_test.go diff --git a/collector/replication_slots.go b/collector/replication_slots.go index d3e91755d..8a3562277 100644 --- a/collector/replication_slots.go +++ b/collector/replication_slots.go @@ -47,19 +47,22 @@ var pgReplicationSlotConfirmedFlushLSN = prometheus.NewDesc( var pgReplicationSlotActive = prometheus.NewDesc( "pg_replication_slot_is_active", - "last lsn confirmed flushed to the replication slot", + "whether the replication slot is active or not", []string{"slot_name"}, nil, ) +var pgReplicationSlotQuery = `SELECT + slot_name, + pg_current_wal_lsn() - '0/0' AS current_wal_lsn, + coalesce(confirmed_flush_lsn, '0/0') - '0/0', + active +FROM + pg_replication_slots; +` + func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.QueryContext(ctx, - `SELECT - slot_name, - pg_current_wal_lsn() - '0/0' AS current_wal_lsn, - coalesce(confirmed_flush_lsn, '0/0') - '0/0', - active - FROM - pg_replication_slots;`) + pgReplicationSlotQuery) if err != nil { return err } @@ -70,10 +73,18 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch cha var wal_lsn int64 var flush_lsn int64 var is_active bool + var is_active_value int + if err := rows.Scan(&slot_name, &wal_lsn, &flush_lsn, &is_active); err != nil { return err } + if is_active { + is_active_value = 1 + } else { + is_active_value = 0 + } + ch <- prometheus.MustNewConstMetric( pgReplicationSlotCurrentWalLSN, prometheus.GaugeValue, float64(wal_lsn), slot_name, @@ -86,7 +97,7 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch cha } ch <- prometheus.MustNewConstMetric( pgReplicationSlotActive, - prometheus.GaugeValue, float64(flush_lsn), slot_name, + prometheus.GaugeValue, float64(is_active_value), slot_name, ) } if err := rows.Err(); err != nil { diff --git a/collector/replication_slots_test.go b/collector/replication_slots_test.go new file mode 100644 index 000000000..17f99cd1c --- /dev/null +++ b/collector/replication_slots_test.go @@ -0,0 +1,89 @@ +package collector + +import ( + "context" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" +) + +func TestPgReplicationSlotCollectorActive(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub db connection: %s", err) + } + defer db.Close() + + columns := []string{"slot_name", "current_wal_lsn", "confirmed_flush_lsn", "active"} + rows := sqlmock.NewRows(columns). + AddRow("test_slot", 5, 3, true) + mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGReplicationSlotCollector{} + + if err := c.Update(context.Background(), db, ch); err != nil { + t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) + } + }() + + expected := []MetricResult{ + {labels: labelMap{"slot_name": "test_slot"}, value: 5, metricType: dto.MetricType_GAUGE}, + {labels: labelMap{"slot_name": "test_slot"}, value: 3, metricType: dto.MetricType_GAUGE}, + {labels: labelMap{"slot_name": "test_slot"}, value: 1, metricType: dto.MetricType_GAUGE}, + } + + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + m := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, m) + } + }) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} + +func TestPgReplicationSlotCollectorInActive(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub db connection: %s", err) + } + defer db.Close() + + columns := []string{"slot_name", "current_wal_lsn", "confirmed_flush_lsn", "active"} + rows := sqlmock.NewRows(columns). + AddRow("test_slot", 6, 12, false) + mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGReplicationSlotCollector{} + + if err := c.Update(context.Background(), db, ch); err != nil { + t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) + } + }() + + expected := []MetricResult{ + {labels: labelMap{"slot_name": "test_slot"}, value: 6, metricType: dto.MetricType_GAUGE}, + {labels: labelMap{"slot_name": "test_slot"}, value: 0, metricType: dto.MetricType_GAUGE}, + } + + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + m := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, m) + } + }) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } + +} From 090b41ecfb21becf14db51232c9912696b1f9c91 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 15:27:16 -0700 Subject: [PATCH 16/30] Add pg_replication_test Signed-off-by: Felix Yuan --- collector/pg_replication.go | 21 ++++++++------ collector/pg_replication_test.go | 49 ++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 9 deletions(-) create mode 100644 collector/pg_replication_test.go diff --git a/collector/pg_replication.go b/collector/pg_replication.go index f736dbb1a..c4ce5aeda 100644 --- a/collector/pg_replication.go +++ b/collector/pg_replication.go @@ -43,17 +43,20 @@ var pgReplicationIsReplica = prometheus.NewDesc( []string{}, nil, ) +var pgReplicationQuery = `SELECT +CASE + WHEN NOT pg_is_in_recovery() THEN 0 + ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) +END AS lag, +CASE + WHEN pg_is_in_recovery() THEN 1 + ELSE 0 +END as is_replica` + func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { row := db.QueryRowContext(ctx, - `SELECT - CASE - WHEN NOT pg_is_in_recovery() THEN 0 - ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) - END AS lag, - CASE - WHEN pg_is_in_recovery() THEN 1 - ELSE 0 - END as is_replica`) + pgReplicationQuery, + ) var lag float64 var isReplica int64 diff --git a/collector/pg_replication_test.go b/collector/pg_replication_test.go new file mode 100644 index 000000000..0e32035ae --- /dev/null +++ b/collector/pg_replication_test.go @@ -0,0 +1,49 @@ +package collector + +import ( + "context" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" +) + +func TestPgReplicationCollector(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub db connection: %s", err) + } + defer db.Close() + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGReplicationCollector{} + + if err := c.Update(context.Background(), db, ch); err != nil { + t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) + } + }() + + columns := []string{"lag", "is_replica"} + rows := sqlmock.NewRows(columns). + AddRow(1000, 1) + mock.ExpectQuery(sanitizeQuery(pgReplicationQuery)).WillReturnRows(rows) + + expected := []MetricResult{ + {labels: labelMap{}, value: 1000, metricType: dto.MetricType_GAUGE}, + {labels: labelMap{}, value: 1, metricType: dto.MetricType_GAUGE}, + } + + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + m := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, m) + } + }) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} From b83c2a5bce369e1816b39dea1a0cd0aab6b2bb6d Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 15:29:30 -0700 Subject: [PATCH 17/30] Add license header to all files Signed-off-by: Felix Yuan --- collector/collector_test.go | 12 ++++++++++++ collector/pg_database_test.go | 12 ++++++++++++ collector/pg_postmaster_test.go | 12 ++++++++++++ collector/pg_replication_test.go | 12 ++++++++++++ collector/replication_slots_test.go | 12 ++++++++++++ 5 files changed, 60 insertions(+) diff --git a/collector/collector_test.go b/collector/collector_test.go index 8450920e1..061de8895 100644 --- a/collector/collector_test.go +++ b/collector/collector_test.go @@ -1,3 +1,15 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package collector import ( diff --git a/collector/pg_database_test.go b/collector/pg_database_test.go index 982e4a3ea..bb108bb86 100644 --- a/collector/pg_database_test.go +++ b/collector/pg_database_test.go @@ -1,3 +1,15 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package collector import ( diff --git a/collector/pg_postmaster_test.go b/collector/pg_postmaster_test.go index 4fde08146..9b93a5c91 100644 --- a/collector/pg_postmaster_test.go +++ b/collector/pg_postmaster_test.go @@ -1,3 +1,15 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package collector import ( diff --git a/collector/pg_replication_test.go b/collector/pg_replication_test.go index 0e32035ae..f998117e3 100644 --- a/collector/pg_replication_test.go +++ b/collector/pg_replication_test.go @@ -1,3 +1,15 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package collector import ( diff --git a/collector/replication_slots_test.go b/collector/replication_slots_test.go index 17f99cd1c..0e5891538 100644 --- a/collector/replication_slots_test.go +++ b/collector/replication_slots_test.go @@ -1,3 +1,15 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package collector import ( From 5043cfa8a6157f1db21957044f9324d61c08ffa9 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 16:39:27 -0700 Subject: [PATCH 18/30] Add pg_stat_bg_writer_test Signed-off-by: Felix Yuan --- collector/pg_stat_bgwriter.go | 28 ++++++----- collector/pg_stat_bgwriter_test.go | 76 ++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 13 deletions(-) create mode 100644 collector/pg_stat_bgwriter_test.go diff --git a/collector/pg_stat_bgwriter.go b/collector/pg_stat_bgwriter.go index 2b0d411c5..2bf5ac821 100644 --- a/collector/pg_stat_bgwriter.go +++ b/collector/pg_stat_bgwriter.go @@ -111,21 +111,23 @@ var statBGWriterStatsReset = prometheus.NewDesc( prometheus.Labels{}, ) +var statBGWriterQuery = `SELECT + checkpoints_timed + ,checkpoints_req + ,checkpoint_write_time + ,checkpoint_sync_time + ,buffers_checkpoint + ,buffers_clean + ,maxwritten_clean + ,buffers_backend + ,buffers_backend_fsync + ,buffers_alloc + ,stats_reset +FROM pg_stat_bgwriter;` + func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { row := db.QueryRowContext(ctx, - `SELECT - checkpoints_timed - ,checkpoints_req - ,checkpoint_write_time - ,checkpoint_sync_time - ,buffers_checkpoint - ,buffers_clean - ,maxwritten_clean - ,buffers_backend - ,buffers_backend_fsync - ,buffers_alloc - ,stats_reset - FROM pg_stat_bgwriter;`) + statBGWriterQuery) var cpt int var cpr int diff --git a/collector/pg_stat_bgwriter_test.go b/collector/pg_stat_bgwriter_test.go new file mode 100644 index 000000000..2c9574812 --- /dev/null +++ b/collector/pg_stat_bgwriter_test.go @@ -0,0 +1,76 @@ +package collector + +import ( + "context" + "testing" + "time" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" +) + +func TestPGStatBGWriterCollector(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub db connection: %s", err) + } + defer db.Close() + + columns := []string{ + "checkpoints_timed", + "checkpoints_req", + "checkpoint_write_time", + "checkpoint_sync_time", + "buffers_checkpoint", + "buffers_clean", + "maxwritten_clean", + "buffers_backend", + "buffers_backend_fsync", + "buffers_alloc", + "stats_reset"} + + srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07") + if err != nil { + t.Fatalf("Error parsing time: %s", err) + } + + rows := sqlmock.NewRows(columns). + AddRow(354, 4945, 289097744, 1242257, 3275602074, 89320867, 450139, 2034563757, 0, 2725688749, srT) + mock.ExpectQuery(sanitizeQuery(statBGWriterQuery)).WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGStatBGWriterCollector{} + + if err := c.Update(context.Background(), db, ch); err != nil { + t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) + } + }() + + expected := []MetricResult{ + {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 354}, + {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 4945}, + {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 289097744}, + {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 1242257}, + {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 3275602074}, + {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 89320867}, + {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 450139}, + {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 2034563757}, + {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, + {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 2725688749}, + {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 1685059842}, + } + + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + m := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, m) + } + }) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} From c64752879e0d7b4568fb493075e96b2c2878a6f3 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 16:48:48 -0700 Subject: [PATCH 19/30] Add pg_stat_statements test Signed-off-by: Felix Yuan --- collector/pg_stat_statements.go | 42 +++++++++++----------- collector/pg_stat_statements_test.go | 52 ++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 20 deletions(-) create mode 100644 collector/pg_stat_statements_test.go diff --git a/collector/pg_stat_statements.go b/collector/pg_stat_statements.go index 8b4dff5c3..81869d176 100644 --- a/collector/pg_stat_statements.go +++ b/collector/pg_stat_statements.go @@ -73,28 +73,30 @@ var statStatementsBlockWriteSecondsTotal = prometheus.NewDesc( prometheus.Labels{}, ) +var pgStatStatementsQuery = `SELECT +pg_get_userbyid(userid) as user, +pg_database.datname, +pg_stat_statements.queryid, +pg_stat_statements.calls as calls_total, +pg_stat_statements.total_time / 1000.0 as seconds_total, +pg_stat_statements.rows as rows_total, +pg_stat_statements.blk_read_time / 1000.0 as block_read_seconds_total, +pg_stat_statements.blk_write_time / 1000.0 as block_write_seconds_total +FROM pg_stat_statements +JOIN pg_database + ON pg_database.oid = pg_stat_statements.dbid +WHERE + total_time > ( + SELECT percentile_cont(0.1) + WITHIN GROUP (ORDER BY total_time) + FROM pg_stat_statements + ) +ORDER BY seconds_total DESC +LIMIT 100;` + func (PGStatStatementsCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.QueryContext(ctx, - `SELECT - pg_get_userbyid(userid) as user, - pg_database.datname, - pg_stat_statements.queryid, - pg_stat_statements.calls as calls_total, - pg_stat_statements.total_time / 1000.0 as seconds_total, - pg_stat_statements.rows as rows_total, - pg_stat_statements.blk_read_time / 1000.0 as block_read_seconds_total, - pg_stat_statements.blk_write_time / 1000.0 as block_write_seconds_total - FROM pg_stat_statements - JOIN pg_database - ON pg_database.oid = pg_stat_statements.dbid - WHERE - total_time > ( - SELECT percentile_cont(0.1) - WITHIN GROUP (ORDER BY total_time) - FROM pg_stat_statements - ) - ORDER BY seconds_total DESC - LIMIT 100`) + pgStatStatementsQuery) if err != nil { return err diff --git a/collector/pg_stat_statements_test.go b/collector/pg_stat_statements_test.go new file mode 100644 index 000000000..000f90f34 --- /dev/null +++ b/collector/pg_stat_statements_test.go @@ -0,0 +1,52 @@ +package collector + +import ( + "context" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" +) + +func TestPgStateStatementsCollector(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub db connection: %s", err) + } + defer db.Close() + + columns := []string{"user", "datname", "queryid", "calls_total", "seconds_total", "rows_total", "block_read_seconds_total", "block_write_seconds_total"} + rows := sqlmock.NewRows(columns). + AddRow("postgres", "postgres", 1500, 5, 0.4, 100, 0.1, 0.2) + mock.ExpectQuery(sanitizeQuery(pgStatStatementsQuery)).WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGStatStatementsCollector{} + + if err := c.Update(context.Background(), db, ch); err != nil { + t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) + } + }() + + expected := []MetricResult{ + {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 5}, + {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.4}, + {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 100}, + {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.1}, + {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.2}, + } + + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + m := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, m) + } + }) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} From b6c6f77b8003c1146782924462e90232a8b31e05 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 17:11:15 -0700 Subject: [PATCH 20/30] Add test for stat_user_tables Signed-off-by: Felix Yuan --- collector/pg_replication_test.go | 2 +- collector/pg_stat_bgwriter_test.go | 2 +- collector/pg_stat_statements_test.go | 4 +- collector/pg_stat_user_tables.go | 52 ++++++----- collector/pg_stat_user_tables_test.go | 126 ++++++++++++++++++++++++++ collector/replication_slots_test.go | 2 +- 6 files changed, 158 insertions(+), 30 deletions(-) create mode 100644 collector/pg_stat_user_tables_test.go diff --git a/collector/pg_replication_test.go b/collector/pg_replication_test.go index f998117e3..5b55a12c1 100644 --- a/collector/pg_replication_test.go +++ b/collector/pg_replication_test.go @@ -35,7 +35,7 @@ func TestPgReplicationCollector(t *testing.T) { c := PGReplicationCollector{} if err := c.Update(context.Background(), db, ch); err != nil { - t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) + t.Errorf("Error calling PGReplicationCollector.Update: %s", err) } }() diff --git a/collector/pg_stat_bgwriter_test.go b/collector/pg_stat_bgwriter_test.go index 2c9574812..c945b4d30 100644 --- a/collector/pg_stat_bgwriter_test.go +++ b/collector/pg_stat_bgwriter_test.go @@ -46,7 +46,7 @@ func TestPGStatBGWriterCollector(t *testing.T) { c := PGStatBGWriterCollector{} if err := c.Update(context.Background(), db, ch); err != nil { - t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) + t.Errorf("Error calling PGStatBGWriterCollector.Update: %s", err) } }() diff --git a/collector/pg_stat_statements_test.go b/collector/pg_stat_statements_test.go index 000f90f34..79a7b5f9a 100644 --- a/collector/pg_stat_statements_test.go +++ b/collector/pg_stat_statements_test.go @@ -10,7 +10,7 @@ import ( "github.com/smartystreets/goconvey/convey" ) -func TestPgStateStatementsCollector(t *testing.T) { +func TestPGStateStatementsCollector(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("Error opening a stub db connection: %s", err) @@ -28,7 +28,7 @@ func TestPgStateStatementsCollector(t *testing.T) { c := PGStatStatementsCollector{} if err := c.Update(context.Background(), db, ch); err != nil { - t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) + t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) } }() diff --git a/collector/pg_stat_user_tables.go b/collector/pg_stat_user_tables.go index 00c0ed2de..c44d7c502 100644 --- a/collector/pg_stat_user_tables.go +++ b/collector/pg_stat_user_tables.go @@ -169,33 +169,35 @@ var statUserTablesAutoanalyzeCount = prometheus.NewDesc( prometheus.Labels{}, ) +var statUserTablesQuery = `SELECT + current_database() datname, + schemaname, + relname, + seq_scan, + seq_tup_read, + idx_scan, + idx_tup_fetch, + n_tup_ins, + n_tup_upd, + n_tup_del, + n_tup_hot_upd, + n_live_tup, + n_dead_tup, + n_mod_since_analyze, + COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, + COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, + COALESCE(last_analyze, '1970-01-01Z') as last_analyze, + COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, + vacuum_count, + autovacuum_count, + analyze_count, + autoanalyze_count +FROM + pg_stat_user_tables` + func (c *PGStatUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.QueryContext(ctx, - `SELECT - current_database() datname, - schemaname, - relname, - seq_scan, - seq_tup_read, - idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_live_tup, - n_dead_tup, - n_mod_since_analyze, - COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, - COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, - COALESCE(last_analyze, '1970-01-01Z') as last_analyze, - COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, - vacuum_count, - autovacuum_count, - analyze_count, - autoanalyze_count - FROM - pg_stat_user_tables`) + statUserTablesQuery) if err != nil { return err diff --git a/collector/pg_stat_user_tables_test.go b/collector/pg_stat_user_tables_test.go new file mode 100644 index 000000000..83018a41d --- /dev/null +++ b/collector/pg_stat_user_tables_test.go @@ -0,0 +1,126 @@ +package collector + +import ( + "context" + "testing" + "time" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" +) + +func TestPGStatUserTablesCollector(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub db connection: %s", err) + } + defer db.Close() + + lastVacuumTime, err := time.Parse("2006-01-02Z", "2023-06-02Z") + if err != nil { + t.Fatalf("Error parsing vacuum time: %s", err) + } + lastAutoVacuumTime, err := time.Parse("2006-01-02Z", "2023-06-03Z") + if err != nil { + t.Fatalf("Error parsing vacuum time: %s", err) + } + lastAnalyzeTime, err := time.Parse("2006-01-02Z", "2023-06-04Z") + if err != nil { + t.Fatalf("Error parsing vacuum time: %s", err) + } + lastAutoAnalyzeTime, err := time.Parse("2006-01-02Z", "2023-06-05Z") + if err != nil { + t.Fatalf("Error parsing vacuum time: %s", err) + } + + columns := []string{ + "datname", + "schemaname", + "relname", + "seq_scan", + "seq_tup_read", + "idx_scan", + "idx_tup_fetch", + "n_tup_ins", + "n_tup_upd", + "n_tup_del", + "n_tup_hot_upd", + "n_live_tup", + "n_dead_tup", + "n_mod_since_analyze", + "last_vacuum", + "last_autovacuum", + "last_analyze", + "last_autoanalyze", + "vacuum_count", + "autovacuum_count", + "analyze_count", + "autoanalyze_count"} + rows := sqlmock.NewRows(columns). + AddRow("postgres", + "public", + "a_table", + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 0, + lastVacuumTime, + lastAutoVacuumTime, + lastAnalyzeTime, + lastAutoAnalyzeTime, + 11, + 12, + 13, + 14) + mock.ExpectQuery(sanitizeQuery(statUserTablesQuery)).WillReturnRows(rows) + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGStatUserTablesCollector{} + + if err := c.Update(context.Background(), db, ch); err != nil { + t.Errorf("Error calling PGStatUserTablesCollector.Update: %s", err) + } + }() + + expected := []MetricResult{ + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 1}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 2}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 3}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 4}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 5}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 6}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 7}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 8}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 9}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 10}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 0}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685664000}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685750400}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685836800}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685923200}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 11}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 12}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 13}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 14}, + } + + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + m := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, m) + } + }) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} diff --git a/collector/replication_slots_test.go b/collector/replication_slots_test.go index 0e5891538..53bafafad 100644 --- a/collector/replication_slots_test.go +++ b/collector/replication_slots_test.go @@ -79,7 +79,7 @@ func TestPgReplicationSlotCollectorInActive(t *testing.T) { c := PGReplicationSlotCollector{} if err := c.Update(context.Background(), db, ch); err != nil { - t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) + t.Errorf("Error calling PGReplicationSlotCollector.Update: %s", err) } }() From d6b2bfa51578953106fad661298e9beea1fc0ed4 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 17:18:41 -0700 Subject: [PATCH 21/30] Add statio_user_tables_test Signed-off-by: Felix Yuan --- collector/pg_statio_user_tables.go | 28 ++++----- collector/pg_statio_user_tables_test.go | 76 +++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 13 deletions(-) create mode 100644 collector/pg_statio_user_tables_test.go diff --git a/collector/pg_statio_user_tables.go b/collector/pg_statio_user_tables.go index 9ab3d8d93..71bdd9c94 100644 --- a/collector/pg_statio_user_tables.go +++ b/collector/pg_statio_user_tables.go @@ -91,21 +91,23 @@ var statioUserTablesTidxBlksHit = prometheus.NewDesc( prometheus.Labels{}, ) +var statioUserTablesQuery = `SELECT + current_database() datname, + schemaname, + relname, + heap_blks_read, + heap_blks_hit, + idx_blks_read, + idx_blks_hit, + toast_blks_read, + toast_blks_hit, + tidx_blks_read, + tidx_blks_hit +FROM pg_statio_user_tables` + func (PGStatIOUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.QueryContext(ctx, - `SELECT - current_database() datname, - schemaname, - relname, - heap_blks_read, - heap_blks_hit, - idx_blks_read, - idx_blks_hit, - toast_blks_read, - toast_blks_hit, - tidx_blks_read, - tidx_blks_hit - FROM pg_statio_user_tables`) + statioUserTablesQuery) if err != nil { return err diff --git a/collector/pg_statio_user_tables_test.go b/collector/pg_statio_user_tables_test.go new file mode 100644 index 000000000..ce87e7a1b --- /dev/null +++ b/collector/pg_statio_user_tables_test.go @@ -0,0 +1,76 @@ +package collector + +import ( + "context" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" +) + +func TestPGStatIOUserTablesCollector(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub db connection: %s", err) + } + defer db.Close() + + columns := []string{ + "datname", + "schemaname", + "relname", + "heap_blks_read", + "heap_blks_hit", + "idx_blks_read", + "idx_blks_hit", + "toast_blks_read", + "toast_blks_hit", + "tidx_blks_read", + "tidx_blks_hit", + } + rows := sqlmock.NewRows(columns). + AddRow("postgres", + "public", + "a_table", + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8) + mock.ExpectQuery(sanitizeQuery(statioUserTablesQuery)).WillReturnRows(rows) + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGStatIOUserTablesCollector{} + + if err := c.Update(context.Background(), db, ch); err != nil { + t.Errorf("Error calling PGStatIOUserTablesCollector.Update: %s", err) + } + }() + + expected := []MetricResult{ + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 1}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 2}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 3}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 4}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 5}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 6}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 7}, + {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 8}, + } + + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + m := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, m) + } + }) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} From a3175032b8fa2be62bd638797cc5895031c47e5d Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 17:25:15 -0700 Subject: [PATCH 22/30] Add license Signed-off-by: Felix Yuan --- collector/pg_stat_bgwriter_test.go | 12 ++++++++++++ collector/pg_stat_statements_test.go | 12 ++++++++++++ collector/pg_stat_user_tables_test.go | 12 ++++++++++++ collector/pg_statio_user_tables_test.go | 12 ++++++++++++ 4 files changed, 48 insertions(+) diff --git a/collector/pg_stat_bgwriter_test.go b/collector/pg_stat_bgwriter_test.go index c945b4d30..54f625c9e 100644 --- a/collector/pg_stat_bgwriter_test.go +++ b/collector/pg_stat_bgwriter_test.go @@ -1,3 +1,15 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package collector import ( diff --git a/collector/pg_stat_statements_test.go b/collector/pg_stat_statements_test.go index 79a7b5f9a..a5c5cab57 100644 --- a/collector/pg_stat_statements_test.go +++ b/collector/pg_stat_statements_test.go @@ -1,3 +1,15 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package collector import ( diff --git a/collector/pg_stat_user_tables_test.go b/collector/pg_stat_user_tables_test.go index 83018a41d..29b5d15f1 100644 --- a/collector/pg_stat_user_tables_test.go +++ b/collector/pg_stat_user_tables_test.go @@ -1,3 +1,15 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package collector import ( diff --git a/collector/pg_statio_user_tables_test.go b/collector/pg_statio_user_tables_test.go index ce87e7a1b..0a7174d80 100644 --- a/collector/pg_statio_user_tables_test.go +++ b/collector/pg_statio_user_tables_test.go @@ -1,3 +1,15 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package collector import ( From 55302d6bcf3f12acd4e9b5d8a7b156a4f4045023 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 17:26:39 -0700 Subject: [PATCH 23/30] Fix up go mod Signed-off-by: Felix Yuan --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index aeeec3d0e..491e69e4c 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/prometheus/client_model v0.3.0 github.com/prometheus/common v0.42.0 github.com/prometheus/exporter-toolkit v0.9.1 + github.com/smartystreets/goconvey v1.8.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -34,7 +35,6 @@ require ( github.com/prometheus/procfs v0.9.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/smartystreets/assertions v1.13.1 // indirect - github.com/smartystreets/goconvey v1.8.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect golang.org/x/crypto v0.7.0 // indirect golang.org/x/net v0.8.0 // indirect From 09ce114d9ae7ce952caaa534eb7833c4b5ae1e7a Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 17:39:27 -0700 Subject: [PATCH 24/30] Remove replication test to see if it fixes tests Signed-off-by: Felix Yuan --- collector/pg_replication_test.go | 61 -------------------------------- 1 file changed, 61 deletions(-) delete mode 100644 collector/pg_replication_test.go diff --git a/collector/pg_replication_test.go b/collector/pg_replication_test.go deleted file mode 100644 index 5b55a12c1..000000000 --- a/collector/pg_replication_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPgReplicationCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGReplicationCollector{} - - if err := c.Update(context.Background(), db, ch); err != nil { - t.Errorf("Error calling PGReplicationCollector.Update: %s", err) - } - }() - - columns := []string{"lag", "is_replica"} - rows := sqlmock.NewRows(columns). - AddRow(1000, 1) - mock.ExpectQuery(sanitizeQuery(pgReplicationQuery)).WillReturnRows(rows) - - expected := []MetricResult{ - {labels: labelMap{}, value: 1000, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{}, value: 1, metricType: dto.MetricType_GAUGE}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} From add68f5be591043595ad64c5398c448d1a60411d Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 17:42:16 -0700 Subject: [PATCH 25/30] Revert "Remove replication test to see if it fixes tests" This reverts commit 09ce114d9ae7ce952caaa534eb7833c4b5ae1e7a. Signed-off-by: Felix Yuan --- collector/pg_replication_test.go | 61 ++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 collector/pg_replication_test.go diff --git a/collector/pg_replication_test.go b/collector/pg_replication_test.go new file mode 100644 index 000000000..5b55a12c1 --- /dev/null +++ b/collector/pg_replication_test.go @@ -0,0 +1,61 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package collector + +import ( + "context" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" +) + +func TestPgReplicationCollector(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub db connection: %s", err) + } + defer db.Close() + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c := PGReplicationCollector{} + + if err := c.Update(context.Background(), db, ch); err != nil { + t.Errorf("Error calling PGReplicationCollector.Update: %s", err) + } + }() + + columns := []string{"lag", "is_replica"} + rows := sqlmock.NewRows(columns). + AddRow(1000, 1) + mock.ExpectQuery(sanitizeQuery(pgReplicationQuery)).WillReturnRows(rows) + + expected := []MetricResult{ + {labels: labelMap{}, value: 1000, metricType: dto.MetricType_GAUGE}, + {labels: labelMap{}, value: 1, metricType: dto.MetricType_GAUGE}, + } + + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + m := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, m) + } + }) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} From 438140d139010a810850ac6f3a99d4fd642907d0 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Fri, 2 Jun 2023 17:43:39 -0700 Subject: [PATCH 26/30] Move mock before calling query Signed-off-by: Felix Yuan --- collector/pg_replication_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/collector/pg_replication_test.go b/collector/pg_replication_test.go index 5b55a12c1..4d240cdf3 100644 --- a/collector/pg_replication_test.go +++ b/collector/pg_replication_test.go @@ -29,6 +29,11 @@ func TestPgReplicationCollector(t *testing.T) { } defer db.Close() + columns := []string{"lag", "is_replica"} + rows := sqlmock.NewRows(columns). + AddRow(1000, 1) + mock.ExpectQuery(sanitizeQuery(pgReplicationQuery)).WillReturnRows(rows) + ch := make(chan prometheus.Metric) go func() { defer close(ch) @@ -39,11 +44,6 @@ func TestPgReplicationCollector(t *testing.T) { } }() - columns := []string{"lag", "is_replica"} - rows := sqlmock.NewRows(columns). - AddRow(1000, 1) - mock.ExpectQuery(sanitizeQuery(pgReplicationQuery)).WillReturnRows(rows) - expected := []MetricResult{ {labels: labelMap{}, value: 1000, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 1, metricType: dto.MetricType_GAUGE}, From 2784315ba6eb8ae98f24e4995dd68f6a445b8d31 Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Mon, 5 Jun 2023 10:20:06 -0700 Subject: [PATCH 27/30] Fix var names Signed-off-by: Felix Yuan --- collector/pg_database.go | 18 ++-- collector/pg_stat_bgwriter.go | 182 +++++++++++++++++---------------- collector/replication_slots.go | 79 +++++++------- 3 files changed, 141 insertions(+), 138 deletions(-) diff --git a/collector/pg_database.go b/collector/pg_database.go index d6fc5bda1..8a027e2db 100644 --- a/collector/pg_database.go +++ b/collector/pg_database.go @@ -41,14 +41,16 @@ func NewPGDatabaseCollector(config collectorConfig) (Collector, error) { }, nil } -var pgDatabaseSizeBytes = prometheus.NewDesc( - "pg_database_size_bytes", - "Disk space used by the database", - []string{"datname"}, nil, -) +var ( + pgDatabaseSizeDesc = prometheus.NewDesc( + "pg_database_size_bytes", + "Disk space used by the database", + []string{"datname"}, nil, + ) -var pgDatabaseQuery = "SELECT pg_database.datname FROM pg_database;" -var pgDatabaseSizeQuery = "SELECT pg_database_size($1)" + pgDatabaseQuery = "SELECT pg_database.datname FROM pg_database;" + pgDatabaseSizeQuery = "SELECT pg_database_size($1)" +) // Update implements Collector and exposes database size. // It is called by the Prometheus registry when collecting metrics. @@ -95,7 +97,7 @@ func (c PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- p } ch <- prometheus.MustNewConstMetric( - pgDatabaseSizeBytes, + pgDatabaseSizeDesc, prometheus.GaugeValue, float64(size), datname, ) } diff --git a/collector/pg_stat_bgwriter.go b/collector/pg_stat_bgwriter.go index 2bf5ac821..98a5b5efd 100644 --- a/collector/pg_stat_bgwriter.go +++ b/collector/pg_stat_bgwriter.go @@ -34,96 +34,98 @@ func NewPGStatBGWriterCollector(collectorConfig) (Collector, error) { const bgWriterSubsystem = "stat_bgwriter" -var statBGWriterCheckpointsTimed = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"), - "Number of scheduled checkpoints that have been performed", - []string{}, - prometheus.Labels{}, -) +var ( + statBGWriterCheckpointsTimedDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"), + "Number of scheduled checkpoints that have been performed", + []string{}, + prometheus.Labels{}, + ) -var statBGWriterCheckpointsReq = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"), - "Number of requested checkpoints that have been performed", - []string{}, - prometheus.Labels{}, -) + statBGWriterCheckpointsReqDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"), + "Number of requested checkpoints that have been performed", + []string{}, + prometheus.Labels{}, + ) -var statBGWriterCheckpointWriteTime = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"), - "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", - []string{}, - prometheus.Labels{}, -) + statBGWriterCheckpointsReqTimeDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"), + "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", + []string{}, + prometheus.Labels{}, + ) -var statBGWriterCheckpointSyncTime = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"), - "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", - []string{}, - prometheus.Labels{}, -) + statBGWriterCheckpointsSyncTimeDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"), + "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", + []string{}, + prometheus.Labels{}, + ) -var statBGWriterBuffersCheckpoint = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"), - "Number of buffers written during checkpoints", - []string{}, - prometheus.Labels{}, -) + statBGWriterBuffersCheckpointDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"), + "Number of buffers written during checkpoints", + []string{}, + prometheus.Labels{}, + ) -var statBGWriterBuffersClean = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"), - "Number of buffers written by the background writer", - []string{}, - prometheus.Labels{}, -) + statBGWriterBuffersCleanDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"), + "Number of buffers written by the background writer", + []string{}, + prometheus.Labels{}, + ) -var statBGWriterMaxWrittenClean = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"), - "Number of times the background writer stopped a cleaning scan because it had written too many buffers", - []string{}, - prometheus.Labels{}, -) + statBGWriterMaxwrittenCleanDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"), + "Number of times the background writer stopped a cleaning scan because it had written too many buffers", + []string{}, + prometheus.Labels{}, + ) -var statBGWriterBuffersBackend = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"), - "Number of buffers written directly by a backend", - []string{}, - prometheus.Labels{}, -) + statBGWriterBuffersBackendDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"), + "Number of buffers written directly by a backend", + []string{}, + prometheus.Labels{}, + ) -var statBGWriterBuffersBackendFsync = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"), - "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", - []string{}, - prometheus.Labels{}, -) + statBGWriterBuffersBackendFsyncDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"), + "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", + []string{}, + prometheus.Labels{}, + ) -var statBGWriterBuffersAlloc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"), - "Number of buffers allocated", - []string{}, - prometheus.Labels{}, -) + statBGWriterBuffersAllocDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"), + "Number of buffers allocated", + []string{}, + prometheus.Labels{}, + ) -var statBGWriterStatsReset = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"), - "Time at which these statistics were last reset", - []string{}, - prometheus.Labels{}, -) + statBGWriterStatsResetDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"), + "Time at which these statistics were last reset", + []string{}, + prometheus.Labels{}, + ) -var statBGWriterQuery = `SELECT - checkpoints_timed - ,checkpoints_req - ,checkpoint_write_time - ,checkpoint_sync_time - ,buffers_checkpoint - ,buffers_clean - ,maxwritten_clean - ,buffers_backend - ,buffers_backend_fsync - ,buffers_alloc - ,stats_reset -FROM pg_stat_bgwriter;` + statBGWriterQuery = `SELECT + checkpoints_timed + ,checkpoints_req + ,checkpoint_write_time + ,checkpoint_sync_time + ,buffers_checkpoint + ,buffers_clean + ,maxwritten_clean + ,buffers_backend + ,buffers_backend_fsync + ,buffers_alloc + ,stats_reset + FROM pg_stat_bgwriter;` +) func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { row := db.QueryRowContext(ctx, @@ -147,57 +149,57 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- } ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsTimed, + statBGWriterCheckpointsTimedDesc, prometheus.CounterValue, float64(cpt), ) ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsReq, + statBGWriterCheckpointsReqDesc, prometheus.CounterValue, float64(cpr), ) ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointWriteTime, + statBGWriterCheckpointsReqTimeDesc, prometheus.CounterValue, float64(cpwt), ) ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointSyncTime, + statBGWriterCheckpointsSyncTimeDesc, prometheus.CounterValue, float64(cpst), ) ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersCheckpoint, + statBGWriterBuffersCheckpointDesc, prometheus.CounterValue, float64(bcp), ) ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersClean, + statBGWriterBuffersCleanDesc, prometheus.CounterValue, float64(bc), ) ch <- prometheus.MustNewConstMetric( - statBGWriterMaxWrittenClean, + statBGWriterMaxwrittenCleanDesc, prometheus.CounterValue, float64(mwc), ) ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersBackend, + statBGWriterBuffersBackendDesc, prometheus.CounterValue, float64(bb), ) ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersBackendFsync, + statBGWriterBuffersBackendFsyncDesc, prometheus.CounterValue, float64(bbf), ) ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersAlloc, + statBGWriterBuffersAllocDesc, prometheus.CounterValue, float64(ba), ) ch <- prometheus.MustNewConstMetric( - statBGWriterStatsReset, + statBGWriterStatsResetDesc, prometheus.CounterValue, float64(sr.Unix()), ) diff --git a/collector/replication_slots.go b/collector/replication_slots.go index 8a3562277..95690f0bc 100644 --- a/collector/replication_slots.go +++ b/collector/replication_slots.go @@ -33,32 +33,33 @@ func NewPGReplicationSlotCollector(config collectorConfig) (Collector, error) { return &PGReplicationSlotCollector{log: config.logger}, nil } -var pgReplicationSlotCurrentWalLSN = prometheus.NewDesc( - "pg_replication_slot_current_wal_lsn", - "current wal lsn value", - []string{"slot_name"}, nil, -) +var ( + pgReplicationSlotCurrentWalDesc = prometheus.NewDesc( + "pg_replication_slot_current_wal_lsn", + "current wal lsn value", + []string{"slot_name"}, nil, + ) -var pgReplicationSlotConfirmedFlushLSN = prometheus.NewDesc( - "pg_replication_slot_confirmed_flush_lsn", - "last lsn confirmed flushed to the replication slot", - []string{"slot_name"}, nil, -) + pgReplicationSlotCurrentFlushDesc = prometheus.NewDesc( + "pg_replication_slot_confirmed_flush_lsn", + "last lsn confirmed flushed to the replication slot", + []string{"slot_name"}, nil, + ) -var pgReplicationSlotActive = prometheus.NewDesc( - "pg_replication_slot_is_active", - "whether the replication slot is active or not", - []string{"slot_name"}, nil, -) + pgReplicationSlotIsActiveDesc = prometheus.NewDesc( + "pg_replication_slot_is_active", + "whether the replication slot is active or not", + []string{"slot_name"}, nil, + ) -var pgReplicationSlotQuery = `SELECT - slot_name, - pg_current_wal_lsn() - '0/0' AS current_wal_lsn, - coalesce(confirmed_flush_lsn, '0/0') - '0/0', - active -FROM - pg_replication_slots; -` + pgReplicationSlotQuery = `SELECT + slot_name, + pg_current_wal_lsn() - '0/0' AS current_wal_lsn, + coalesce(confirmed_flush_lsn, '0/0') - '0/0', + active + FROM + pg_replication_slots;` +) func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.QueryContext(ctx, @@ -69,35 +70,33 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch cha defer rows.Close() for rows.Next() { - var slot_name string - var wal_lsn int64 - var flush_lsn int64 - var is_active bool - var is_active_value int + var slotName string + var walLsn int64 + var flushLsn int64 + var isActive bool - if err := rows.Scan(&slot_name, &wal_lsn, &flush_lsn, &is_active); err != nil { + if err := rows.Scan(&slotName, &walLsn, &flushLsn, &isActive); err != nil { return err } - if is_active { - is_active_value = 1 - } else { - is_active_value = 0 + isActiveValue := 0 + if isActive { + isActiveValue = 1 } ch <- prometheus.MustNewConstMetric( - pgReplicationSlotCurrentWalLSN, - prometheus.GaugeValue, float64(wal_lsn), slot_name, + pgReplicationSlotCurrentWalDesc, + prometheus.GaugeValue, float64(walLsn), slotName, ) - if is_active { + if isActive { ch <- prometheus.MustNewConstMetric( - pgReplicationSlotConfirmedFlushLSN, - prometheus.GaugeValue, float64(flush_lsn), slot_name, + pgReplicationSlotCurrentFlushDesc, + prometheus.GaugeValue, float64(flushLsn), slotName, ) } ch <- prometheus.MustNewConstMetric( - pgReplicationSlotActive, - prometheus.GaugeValue, float64(is_active_value), slot_name, + pgReplicationSlotIsActiveDesc, + prometheus.GaugeValue, float64(isActiveValue), slotName, ) } if err := rows.Err(); err != nil { From cbc41cff4b3c6dd1f08f371a66abba2bcf78811a Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Mon, 5 Jun 2023 10:24:57 -0700 Subject: [PATCH 28/30] Add some var blocks and look for snake case Signed-off-by: Felix Yuan --- collector/pg_replication.go | 40 ++-- collector/pg_stat_statements.go | 102 ++++----- collector/pg_stat_user_tables.go | 318 +++++++++++++++-------------- collector/pg_statio_user_tables.go | 140 ++++++------- 4 files changed, 304 insertions(+), 296 deletions(-) diff --git a/collector/pg_replication.go b/collector/pg_replication.go index c4ce5aeda..3c857a8a7 100644 --- a/collector/pg_replication.go +++ b/collector/pg_replication.go @@ -31,27 +31,29 @@ func NewPGReplicationCollector(collectorConfig) (Collector, error) { return &PGPostmasterCollector{}, nil } -var pgReplicationLag = prometheus.NewDesc( - "pg_replication_lag", - "Replication lag behind master in seconds", - []string{}, nil, -) +var ( + pgReplicationLag = prometheus.NewDesc( + "pg_replication_lag", + "Replication lag behind master in seconds", + []string{}, nil, + ) -var pgReplicationIsReplica = prometheus.NewDesc( - "pg_replication_is_replica", - "Indicates if the server is a replica", - []string{}, nil, -) + pgReplicationIsReplica = prometheus.NewDesc( + "pg_replication_is_replica", + "Indicates if the server is a replica", + []string{}, nil, + ) -var pgReplicationQuery = `SELECT -CASE - WHEN NOT pg_is_in_recovery() THEN 0 - ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) -END AS lag, -CASE - WHEN pg_is_in_recovery() THEN 1 - ELSE 0 -END as is_replica` + pgReplicationQuery = `SELECT + CASE + WHEN NOT pg_is_in_recovery() THEN 0 + ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) + END AS lag, + CASE + WHEN pg_is_in_recovery() THEN 1 + ELSE 0 + END as is_replica` +) func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { row := db.QueryRowContext(ctx, diff --git a/collector/pg_stat_statements.go b/collector/pg_stat_statements.go index 81869d176..f8fb54049 100644 --- a/collector/pg_stat_statements.go +++ b/collector/pg_stat_statements.go @@ -32,67 +32,69 @@ type PGStatStatementsCollector struct { log log.Logger } -var statStatementsSubsystem = "stat_statements" +const statStatementsSubsystem = "stat_statements" func NewPGStatStatementsCollector(config collectorConfig) (Collector, error) { return &PGStatStatementsCollector{log: config.logger}, nil } -var statSTatementsCallsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "calls_total"), - "Number of times executed", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, -) +var ( + statSTatementsCallsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "calls_total"), + "Number of times executed", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, + ) -var statStatementsSecondsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "seconds_total"), - "Total time spent in the statement, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, -) + statStatementsSecondsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "seconds_total"), + "Total time spent in the statement, in seconds", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, + ) -var statStatementsRowsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "rows_total"), - "Total number of rows retrieved or affected by the statement", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, -) + statStatementsRowsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "rows_total"), + "Total number of rows retrieved or affected by the statement", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, + ) -var statStatementsBlockReadSecondsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_read_seconds_total"), - "Total time the statement spent reading blocks, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, -) + statStatementsBlockReadSecondsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_read_seconds_total"), + "Total time the statement spent reading blocks, in seconds", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, + ) -var statStatementsBlockWriteSecondsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_write_seconds_total"), - "Total time the statement spent writing blocks, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, -) + statStatementsBlockWriteSecondsTotal = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_write_seconds_total"), + "Total time the statement spent writing blocks, in seconds", + []string{"user", "datname", "queryid"}, + prometheus.Labels{}, + ) -var pgStatStatementsQuery = `SELECT -pg_get_userbyid(userid) as user, -pg_database.datname, -pg_stat_statements.queryid, -pg_stat_statements.calls as calls_total, -pg_stat_statements.total_time / 1000.0 as seconds_total, -pg_stat_statements.rows as rows_total, -pg_stat_statements.blk_read_time / 1000.0 as block_read_seconds_total, -pg_stat_statements.blk_write_time / 1000.0 as block_write_seconds_total -FROM pg_stat_statements -JOIN pg_database - ON pg_database.oid = pg_stat_statements.dbid -WHERE - total_time > ( - SELECT percentile_cont(0.1) - WITHIN GROUP (ORDER BY total_time) + pgStatStatementsQuery = `SELECT + pg_get_userbyid(userid) as user, + pg_database.datname, + pg_stat_statements.queryid, + pg_stat_statements.calls as calls_total, + pg_stat_statements.total_time / 1000.0 as seconds_total, + pg_stat_statements.rows as rows_total, + pg_stat_statements.blk_read_time / 1000.0 as block_read_seconds_total, + pg_stat_statements.blk_write_time / 1000.0 as block_write_seconds_total FROM pg_stat_statements - ) -ORDER BY seconds_total DESC -LIMIT 100;` + JOIN pg_database + ON pg_database.oid = pg_stat_statements.dbid + WHERE + total_time > ( + SELECT percentile_cont(0.1) + WITHIN GROUP (ORDER BY total_time) + FROM pg_stat_statements + ) + ORDER BY seconds_total DESC + LIMIT 100;` +) func (PGStatStatementsCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.QueryContext(ctx, diff --git a/collector/pg_stat_user_tables.go b/collector/pg_stat_user_tables.go index c44d7c502..dacac3d7a 100644 --- a/collector/pg_stat_user_tables.go +++ b/collector/pg_stat_user_tables.go @@ -30,171 +30,173 @@ type PGStatUserTablesCollector struct { log log.Logger } -var userTableSubsystem = "stat_user_tables" +const userTableSubsystem = "stat_user_tables" func NewPGStatUserTablesCollector(config collectorConfig) (Collector, error) { return &PGStatUserTablesCollector{log: config.logger}, nil } -var statUserTablesSeqScan = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "seq_scan"), - "Number of sequential scans initiated on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, +var ( + statUserTablesSeqScan = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "seq_scan"), + "Number of sequential scans initiated on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesSeqTupRead = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "seq_tup_read"), + "Number of live rows fetched by sequential scans", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesIdxScan = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "idx_scan"), + "Number of index scans initiated on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesIdxTupFetch = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "idx_tup_fetch"), + "Number of live rows fetched by index scans", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesNTupIns = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_ins"), + "Number of rows inserted", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesNTupUpd = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_upd"), + "Number of rows updated", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesNTupDel = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_del"), + "Number of rows deleted", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesNTupHotUpd = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_hot_upd"), + "Number of rows HOT updated (i.e., with no separate index update required)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesNLiveTup = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_live_tup"), + "Estimated number of live rows", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesNDeadTup = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_dead_tup"), + "Estimated number of dead rows", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesNModSinceAnalyze = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_mod_since_analyze"), + "Estimated number of rows changed since last analyze", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesLastVacuum = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_vacuum"), + "Last time at which this table was manually vacuumed (not counting VACUUM FULL)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesLastAutovacuum = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_autovacuum"), + "Last time at which this table was vacuumed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesLastAnalyze = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_analyze"), + "Last time at which this table was manually analyzed", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesLastAutoanalyze = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_autoanalyze"), + "Last time at which this table was analyzed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesVacuumCount = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "vacuum_count"), + "Number of times this table has been manually vacuumed (not counting VACUUM FULL)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesAutovacuumCount = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "autovacuum_count"), + "Number of times this table has been vacuumed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesAnalyzeCount = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "analyze_count"), + "Number of times this table has been manually analyzed", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesAutoanalyzeCount = prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "autoanalyze_count"), + "Number of times this table has been analyzed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statUserTablesQuery = `SELECT + current_database() datname, + schemaname, + relname, + seq_scan, + seq_tup_read, + idx_scan, + idx_tup_fetch, + n_tup_ins, + n_tup_upd, + n_tup_del, + n_tup_hot_upd, + n_live_tup, + n_dead_tup, + n_mod_since_analyze, + COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, + COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, + COALESCE(last_analyze, '1970-01-01Z') as last_analyze, + COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, + vacuum_count, + autovacuum_count, + analyze_count, + autoanalyze_count + FROM + pg_stat_user_tables` ) -var statUserTablesSeqTupRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "seq_tup_read"), - "Number of live rows fetched by sequential scans", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesIdxScan = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "idx_scan"), - "Number of index scans initiated on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesIdxTupFetch = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "idx_tup_fetch"), - "Number of live rows fetched by index scans", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesNTupIns = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_ins"), - "Number of rows inserted", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesNTupUpd = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_upd"), - "Number of rows updated", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesNTupDel = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_del"), - "Number of rows deleted", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesNTupHotUpd = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_hot_upd"), - "Number of rows HOT updated (i.e., with no separate index update required)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesNLiveTup = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_live_tup"), - "Estimated number of live rows", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesNDeadTup = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_dead_tup"), - "Estimated number of dead rows", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesNModSinceAnalyze = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_mod_since_analyze"), - "Estimated number of rows changed since last analyze", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesLastVacuum = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_vacuum"), - "Last time at which this table was manually vacuumed (not counting VACUUM FULL)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesLastAutovacuum = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_autovacuum"), - "Last time at which this table was vacuumed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesLastAnalyze = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_analyze"), - "Last time at which this table was manually analyzed", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesLastAutoanalyze = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_autoanalyze"), - "Last time at which this table was analyzed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesVacuumCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "vacuum_count"), - "Number of times this table has been manually vacuumed (not counting VACUUM FULL)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesAutovacuumCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "autovacuum_count"), - "Number of times this table has been vacuumed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesAnalyzeCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "analyze_count"), - "Number of times this table has been manually analyzed", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesAutoanalyzeCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "autoanalyze_count"), - "Number of times this table has been analyzed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statUserTablesQuery = `SELECT - current_database() datname, - schemaname, - relname, - seq_scan, - seq_tup_read, - idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_live_tup, - n_dead_tup, - n_mod_since_analyze, - COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, - COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, - COALESCE(last_analyze, '1970-01-01Z') as last_analyze, - COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, - vacuum_count, - autovacuum_count, - analyze_count, - autoanalyze_count -FROM - pg_stat_user_tables` - func (c *PGStatUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.QueryContext(ctx, statUserTablesQuery) diff --git a/collector/pg_statio_user_tables.go b/collector/pg_statio_user_tables.go index 71bdd9c94..f6fd40534 100644 --- a/collector/pg_statio_user_tables.go +++ b/collector/pg_statio_user_tables.go @@ -29,82 +29,84 @@ type PGStatIOUserTablesCollector struct { log log.Logger } -var statioUserTableSubsystem = "statio_user_tables" +const statioUserTableSubsystem = "statio_user_tables" func NewPGStatIOUserTablesCollector(config collectorConfig) (Collector, error) { return &PGStatIOUserTablesCollector{log: config.logger}, nil } -var statioUserTablesHeapBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_read"), - "Number of disk blocks read from this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, +var ( + statioUserTablesHeapBlksRead = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_read"), + "Number of disk blocks read from this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statioUserTablesHeapBlksHit = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_hit"), + "Number of buffer hits in this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statioUserTablesIdxBlksRead = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_read"), + "Number of disk blocks read from all indexes on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statioUserTablesIdxBlksHit = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_hit"), + "Number of buffer hits in all indexes on this table", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statioUserTablesToastBlksRead = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_read"), + "Number of disk blocks read from this table's TOAST table (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statioUserTablesToastBlksHit = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_hit"), + "Number of buffer hits in this table's TOAST table (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statioUserTablesTidxBlksRead = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_read"), + "Number of disk blocks read from this table's TOAST table indexes (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statioUserTablesTidxBlksHit = prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_hit"), + "Number of buffer hits in this table's TOAST table indexes (if any)", + []string{"datname", "schemaname", "relname"}, + prometheus.Labels{}, + ) + + statioUserTablesQuery = `SELECT + current_database() datname, + schemaname, + relname, + heap_blks_read, + heap_blks_hit, + idx_blks_read, + idx_blks_hit, + toast_blks_read, + toast_blks_hit, + tidx_blks_read, + tidx_blks_hit + FROM pg_statio_user_tables` ) -var statioUserTablesHeapBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_hit"), - "Number of buffer hits in this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statioUserTablesIdxBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_read"), - "Number of disk blocks read from all indexes on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statioUserTablesIdxBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_hit"), - "Number of buffer hits in all indexes on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statioUserTablesToastBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_read"), - "Number of disk blocks read from this table's TOAST table (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statioUserTablesToastBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_hit"), - "Number of buffer hits in this table's TOAST table (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statioUserTablesTidxBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_read"), - "Number of disk blocks read from this table's TOAST table indexes (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statioUserTablesTidxBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_hit"), - "Number of buffer hits in this table's TOAST table indexes (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, -) - -var statioUserTablesQuery = `SELECT - current_database() datname, - schemaname, - relname, - heap_blks_read, - heap_blks_hit, - idx_blks_read, - idx_blks_hit, - toast_blks_read, - toast_blks_hit, - tidx_blks_read, - tidx_blks_hit -FROM pg_statio_user_tables` - func (PGStatIOUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.QueryContext(ctx, statioUserTablesQuery) From d703cf73975ae93ae97f5f6019da895209c74a2f Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Mon, 5 Jun 2023 10:29:55 -0700 Subject: [PATCH 29/30] Cleanup Signed-off-by: Felix Yuan --- collector/pg_postmaster.go | 14 ++++++++------ collector/pg_process_idle.go | 2 +- collector/pg_replication.go | 1 - collector/pg_stat_bgwriter.go | 10 ---------- collector/pg_stat_statements.go | 4 ---- collector/pg_stat_user_tables.go | 18 ------------------ collector/pg_statio_user_tables.go | 7 ------- collector/replication_slots.go | 2 -- 8 files changed, 9 insertions(+), 49 deletions(-) diff --git a/collector/pg_postmaster.go b/collector/pg_postmaster.go index 28dbba7d4..b7d844e18 100644 --- a/collector/pg_postmaster.go +++ b/collector/pg_postmaster.go @@ -31,13 +31,15 @@ func NewPGPostmasterCollector(collectorConfig) (Collector, error) { return &PGPostmasterCollector{}, nil } -var pgPostMasterStartTimeSeconds = prometheus.NewDesc( - "pg_postmaster_start_time_seconds", - "Time at which postmaster started", - []string{}, nil, -) +var ( + pgPostMasterStartTimeSeconds = prometheus.NewDesc( + "pg_postmaster_start_time_seconds", + "Time at which postmaster started", + []string{}, nil, + ) -var pgPostmasterQuery = "SELECT pg_postmaster_start_time from pg_postmaster_start_time();" + pgPostmasterQuery = "SELECT pg_postmaster_start_time from pg_postmaster_start_time();" +) func (c *PGPostmasterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { row := db.QueryRowContext(ctx, diff --git a/collector/pg_process_idle.go b/collector/pg_process_idle.go index d9c88471b..b8c1a6cb7 100644 --- a/collector/pg_process_idle.go +++ b/collector/pg_process_idle.go @@ -29,7 +29,7 @@ type PGProcessIdleCollector struct { log log.Logger } -var processIdleSubsystem = "process_idle" +const processIdleSubsystem = "process_idle" func NewPGProcessIdleCollector(config collectorConfig) (Collector, error) { return &PGProcessIdleCollector{log: config.logger}, nil diff --git a/collector/pg_replication.go b/collector/pg_replication.go index 3c857a8a7..10e4de521 100644 --- a/collector/pg_replication.go +++ b/collector/pg_replication.go @@ -37,7 +37,6 @@ var ( "Replication lag behind master in seconds", []string{}, nil, ) - pgReplicationIsReplica = prometheus.NewDesc( "pg_replication_is_replica", "Indicates if the server is a replica", diff --git a/collector/pg_stat_bgwriter.go b/collector/pg_stat_bgwriter.go index 98a5b5efd..6efe87d52 100644 --- a/collector/pg_stat_bgwriter.go +++ b/collector/pg_stat_bgwriter.go @@ -41,70 +41,60 @@ var ( []string{}, prometheus.Labels{}, ) - statBGWriterCheckpointsReqDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"), "Number of requested checkpoints that have been performed", []string{}, prometheus.Labels{}, ) - statBGWriterCheckpointsReqTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"), "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", []string{}, prometheus.Labels{}, ) - statBGWriterCheckpointsSyncTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"), "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", []string{}, prometheus.Labels{}, ) - statBGWriterBuffersCheckpointDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"), "Number of buffers written during checkpoints", []string{}, prometheus.Labels{}, ) - statBGWriterBuffersCleanDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"), "Number of buffers written by the background writer", []string{}, prometheus.Labels{}, ) - statBGWriterMaxwrittenCleanDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"), "Number of times the background writer stopped a cleaning scan because it had written too many buffers", []string{}, prometheus.Labels{}, ) - statBGWriterBuffersBackendDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"), "Number of buffers written directly by a backend", []string{}, prometheus.Labels{}, ) - statBGWriterBuffersBackendFsyncDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"), "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", []string{}, prometheus.Labels{}, ) - statBGWriterBuffersAllocDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"), "Number of buffers allocated", []string{}, prometheus.Labels{}, ) - statBGWriterStatsResetDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"), "Time at which these statistics were last reset", diff --git a/collector/pg_stat_statements.go b/collector/pg_stat_statements.go index f8fb54049..78485f619 100644 --- a/collector/pg_stat_statements.go +++ b/collector/pg_stat_statements.go @@ -45,28 +45,24 @@ var ( []string{"user", "datname", "queryid"}, prometheus.Labels{}, ) - statStatementsSecondsTotal = prometheus.NewDesc( prometheus.BuildFQName(namespace, statStatementsSubsystem, "seconds_total"), "Total time spent in the statement, in seconds", []string{"user", "datname", "queryid"}, prometheus.Labels{}, ) - statStatementsRowsTotal = prometheus.NewDesc( prometheus.BuildFQName(namespace, statStatementsSubsystem, "rows_total"), "Total number of rows retrieved or affected by the statement", []string{"user", "datname", "queryid"}, prometheus.Labels{}, ) - statStatementsBlockReadSecondsTotal = prometheus.NewDesc( prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_read_seconds_total"), "Total time the statement spent reading blocks, in seconds", []string{"user", "datname", "queryid"}, prometheus.Labels{}, ) - statStatementsBlockWriteSecondsTotal = prometheus.NewDesc( prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_write_seconds_total"), "Total time the statement spent writing blocks, in seconds", diff --git a/collector/pg_stat_user_tables.go b/collector/pg_stat_user_tables.go index dacac3d7a..1f267a7ee 100644 --- a/collector/pg_stat_user_tables.go +++ b/collector/pg_stat_user_tables.go @@ -43,126 +43,108 @@ var ( []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesSeqTupRead = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "seq_tup_read"), "Number of live rows fetched by sequential scans", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesIdxScan = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "idx_scan"), "Number of index scans initiated on this table", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesIdxTupFetch = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "idx_tup_fetch"), "Number of live rows fetched by index scans", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesNTupIns = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_ins"), "Number of rows inserted", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesNTupUpd = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_upd"), "Number of rows updated", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesNTupDel = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_del"), "Number of rows deleted", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesNTupHotUpd = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_hot_upd"), "Number of rows HOT updated (i.e., with no separate index update required)", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesNLiveTup = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "n_live_tup"), "Estimated number of live rows", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesNDeadTup = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "n_dead_tup"), "Estimated number of dead rows", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesNModSinceAnalyze = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "n_mod_since_analyze"), "Estimated number of rows changed since last analyze", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesLastVacuum = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "last_vacuum"), "Last time at which this table was manually vacuumed (not counting VACUUM FULL)", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesLastAutovacuum = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "last_autovacuum"), "Last time at which this table was vacuumed by the autovacuum daemon", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesLastAnalyze = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "last_analyze"), "Last time at which this table was manually analyzed", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesLastAutoanalyze = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "last_autoanalyze"), "Last time at which this table was analyzed by the autovacuum daemon", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesVacuumCount = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "vacuum_count"), "Number of times this table has been manually vacuumed (not counting VACUUM FULL)", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesAutovacuumCount = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "autovacuum_count"), "Number of times this table has been vacuumed by the autovacuum daemon", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesAnalyzeCount = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "analyze_count"), "Number of times this table has been manually analyzed", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statUserTablesAutoanalyzeCount = prometheus.NewDesc( prometheus.BuildFQName(namespace, userTableSubsystem, "autoanalyze_count"), "Number of times this table has been analyzed by the autovacuum daemon", diff --git a/collector/pg_statio_user_tables.go b/collector/pg_statio_user_tables.go index f6fd40534..e84631df5 100644 --- a/collector/pg_statio_user_tables.go +++ b/collector/pg_statio_user_tables.go @@ -42,49 +42,42 @@ var ( []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statioUserTablesHeapBlksHit = prometheus.NewDesc( prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_hit"), "Number of buffer hits in this table", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statioUserTablesIdxBlksRead = prometheus.NewDesc( prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_read"), "Number of disk blocks read from all indexes on this table", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statioUserTablesIdxBlksHit = prometheus.NewDesc( prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_hit"), "Number of buffer hits in all indexes on this table", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statioUserTablesToastBlksRead = prometheus.NewDesc( prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_read"), "Number of disk blocks read from this table's TOAST table (if any)", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statioUserTablesToastBlksHit = prometheus.NewDesc( prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_hit"), "Number of buffer hits in this table's TOAST table (if any)", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statioUserTablesTidxBlksRead = prometheus.NewDesc( prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_read"), "Number of disk blocks read from this table's TOAST table indexes (if any)", []string{"datname", "schemaname", "relname"}, prometheus.Labels{}, ) - statioUserTablesTidxBlksHit = prometheus.NewDesc( prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_hit"), "Number of buffer hits in this table's TOAST table indexes (if any)", diff --git a/collector/replication_slots.go b/collector/replication_slots.go index 95690f0bc..8900db3ae 100644 --- a/collector/replication_slots.go +++ b/collector/replication_slots.go @@ -39,13 +39,11 @@ var ( "current wal lsn value", []string{"slot_name"}, nil, ) - pgReplicationSlotCurrentFlushDesc = prometheus.NewDesc( "pg_replication_slot_confirmed_flush_lsn", "last lsn confirmed flushed to the replication slot", []string{"slot_name"}, nil, ) - pgReplicationSlotIsActiveDesc = prometheus.NewDesc( "pg_replication_slot_is_active", "whether the replication slot is active or not", From ac08ee6939ca223df6a54a3927f21c73e8de3e4d Mon Sep 17 00:00:00 2001 From: Felix Yuan Date: Mon, 5 Jun 2023 12:18:48 -0700 Subject: [PATCH 30/30] Go mod tidy and some other fixes Signed-off-by: Felix Yuan --- collector/replication_slots.go | 8 ++++---- go.mod | 6 +++--- go.sum | 5 +++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/collector/replication_slots.go b/collector/replication_slots.go index 20e6ca7a1..0aaf39cdc 100644 --- a/collector/replication_slots.go +++ b/collector/replication_slots.go @@ -70,9 +70,9 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch cha for rows.Next() { var slotName string var walLSN int64 - var flusLSN int64 + var flushLSN int64 var isActive bool - if err := rows.Scan(&slotName, &walLSN, &flusLSN, &isActive); err != nil { + if err := rows.Scan(&slotName, &walLSN, &flushLSN, &isActive); err != nil { return err } @@ -88,12 +88,12 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch cha if isActive { ch <- prometheus.MustNewConstMetric( pgReplicationSlotCurrentFlushDesc, - prometheus.GaugeValue, float64(flusLSN), slotName, + prometheus.GaugeValue, float64(flushLSN), slotName, ) } ch <- prometheus.MustNewConstMetric( pgReplicationSlotIsActiveDesc, - prometheus.GaugeValue, float64(flusLSN), slotName, + prometheus.GaugeValue, float64(isActiveValue), slotName, ) } if err := rows.Err(); err != nil { diff --git a/go.mod b/go.mod index a9884856e..6444a66e4 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/prometheus/client_model v0.4.0 github.com/prometheus/common v0.44.0 github.com/prometheus/exporter-toolkit v0.10.0 - github.com/smartystreets/goconvey v1.8.0 + github.com/smartystreets/goconvey v1.8.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -25,7 +25,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/gopherjs/gopherjs v1.17.2 // indirect + github.com/gopherjs/gopherjs v1.17.2 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/kr/pretty v0.3.1 // indirect @@ -34,7 +34,7 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect - github.com/smartystreets/assertions v1.13.1 // indirect + github.com/smartystreets/assertions v1.13.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect golang.org/x/crypto v0.8.0 // indirect golang.org/x/net v0.10.0 // indirect diff --git a/go.sum b/go.sum index e4c583d4b..12673c116 100644 --- a/go.sum +++ b/go.sum @@ -59,12 +59,13 @@ github.com/prometheus/exporter-toolkit v0.10.0 h1:yOAzZTi4M22ZzVxD+fhy1URTuNRj/3 github.com/prometheus/exporter-toolkit v0.10.0/go.mod h1:+sVFzuvV5JDyw+Ih6p3zFxZNVnKQa3x5qPmDSiPu4ZY= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w= github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=