Skip to content

Commit 24b8e39

Browse files
committed
Change desc pattern to use variables instead of a map
Signed-off-by: Felix Yuan <[email protected]>
1 parent 9a32495 commit 24b8e39

9 files changed

+395
-371
lines changed

collector/pg_database.go

+6-8
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,11 @@ func NewPGDatabaseCollector(config collectorConfig) (Collector, error) {
4141
}, nil
4242
}
4343

44-
var pgDatabase = map[string]*prometheus.Desc{
45-
"size_bytes": prometheus.NewDesc(
46-
"pg_database_size_bytes",
47-
"Disk space used by the database",
48-
[]string{"datname"}, nil,
49-
),
50-
}
44+
var pgDatabaseSizeBytes = prometheus.NewDesc(
45+
"pg_database_size_bytes",
46+
"Disk space used by the database",
47+
[]string{"datname"}, nil,
48+
)
5149

5250
// Update implements Collector and exposes database size.
5351
// It is called by the Prometheus registry when collecting metrics.
@@ -96,7 +94,7 @@ func (c PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- p
9694
}
9795

9896
ch <- prometheus.MustNewConstMetric(
99-
pgDatabase["size_bytes"],
97+
pgDatabaseSizeBytes,
10098
prometheus.GaugeValue, float64(size), datname,
10199
)
102100
}

collector/pg_postmaster.go

+7-9
Original file line numberDiff line numberDiff line change
@@ -31,13 +31,11 @@ func NewPGPostmasterCollector(collectorConfig) (Collector, error) {
3131
return &PGPostmasterCollector{}, nil
3232
}
3333

34-
var pgPostmaster = map[string]*prometheus.Desc{
35-
"start_time_seconds": prometheus.NewDesc(
36-
"pg_postmaster_start_time_seconds",
37-
"Time at which postmaster started",
38-
[]string{"process_name"}, nil,
39-
),
40-
}
34+
var pgPostMasterStartTimeSeconds = prometheus.NewDesc(
35+
"pg_postmaster_start_time_seconds",
36+
"Time at which postmaster started",
37+
[]string{}, nil,
38+
)
4139

4240
func (c *PGPostmasterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
4341
row := db.QueryRowContext(ctx,
@@ -51,8 +49,8 @@ func (c *PGPostmasterCollector) Update(ctx context.Context, db *sql.DB, ch chan<
5149
return err
5250
}
5351
ch <- prometheus.MustNewConstMetric(
54-
pgPostmaster["start_time_seconds"],
55-
prometheus.GaugeValue, startTimeSeconds, "postmaster",
52+
pgPostMasterStartTimeSeconds,
53+
prometheus.GaugeValue, startTimeSeconds,
5654
)
5755
return nil
5856
}

collector/pg_process_idle.go

+7-9
Original file line numberDiff line numberDiff line change
@@ -35,14 +35,12 @@ func NewPGProcessIdleCollector(config collectorConfig) (Collector, error) {
3535
return &PGProcessIdleCollector{log: config.logger}, nil
3636
}
3737

38-
var processIdle = map[string]*prometheus.Desc{
39-
"seconds": prometheus.NewDesc(
40-
prometheus.BuildFQName(namespace, processIdleSubsystem, "seconds"),
41-
"Idle time of server processes",
42-
[]string{"application_name"},
43-
prometheus.Labels{},
44-
),
45-
}
38+
var pgProcessIdleSeconds = prometheus.NewDesc(
39+
prometheus.BuildFQName(namespace, processIdleSubsystem, "seconds"),
40+
"Idle time of server processes",
41+
[]string{"application_name"},
42+
prometheus.Labels{},
43+
)
4644

4745
func (PGProcessIdleCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
4846
row := db.QueryRowContext(ctx,
@@ -100,7 +98,7 @@ func (PGProcessIdleCollector) Update(ctx context.Context, db *sql.DB, ch chan<-
10098
return err
10199
}
102100
ch <- prometheus.MustNewConstHistogram(
103-
processIdle["seconds"],
101+
pgProcessIdleSeconds,
104102
secondsCount, float64(secondsSum), buckets,
105103
applicationName,
106104
)

collector/pg_replication.go

+15-16
Original file line numberDiff line numberDiff line change
@@ -31,18 +31,17 @@ func NewPGReplicationCollector(collectorConfig) (Collector, error) {
3131
return &PGPostmasterCollector{}, nil
3232
}
3333

34-
var pgReplication = map[string]*prometheus.Desc{
35-
"replication_lag": prometheus.NewDesc(
36-
"pg_replication_lag",
37-
"Replication lag behind master in seconds",
38-
[]string{"process_name"}, nil,
39-
),
40-
"is_replica": prometheus.NewDesc(
41-
"pg_replication_is_replica",
42-
"Indicates if the server is a replica",
43-
[]string{"process_name"}, nil,
44-
),
45-
}
34+
var pgReplicationLag = prometheus.NewDesc(
35+
"pg_replication_lag",
36+
"Replication lag behind master in seconds",
37+
[]string{}, nil,
38+
)
39+
40+
var pgReplicationIsReplica = prometheus.NewDesc(
41+
"pg_replication_is_replica",
42+
"Indicates if the server is a replica",
43+
[]string{}, nil,
44+
)
4645

4746
func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
4847
row := db.QueryRowContext(ctx,
@@ -63,12 +62,12 @@ func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan
6362
return err
6463
}
6564
ch <- prometheus.MustNewConstMetric(
66-
pgReplication["replication_lag"],
67-
prometheus.GaugeValue, lag, "replication",
65+
pgReplicationLag,
66+
prometheus.GaugeValue, lag,
6867
)
6968
ch <- prometheus.MustNewConstMetric(
70-
pgReplication["is_replica"],
71-
prometheus.GaugeValue, float64(isReplica), "replication",
69+
pgReplicationIsReplica,
70+
prometheus.GaugeValue, float64(isReplica),
7271
)
7372
return nil
7473
}

collector/pg_stat_bgwriter.go

+87-79
Original file line numberDiff line numberDiff line change
@@ -34,74 +34,82 @@ func NewPGStatBGWriterCollector(collectorConfig) (Collector, error) {
3434

3535
const bgWriterSubsystem = "stat_bgwriter"
3636

37-
var statBGWriter = map[string]*prometheus.Desc{
38-
"checkpoints_timed": prometheus.NewDesc(
39-
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"),
40-
"Number of scheduled checkpoints that have been performed",
41-
[]string{},
42-
prometheus.Labels{},
43-
),
44-
"checkpoints_req": prometheus.NewDesc(
45-
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"),
46-
"Number of requested checkpoints that have been performed",
47-
[]string{},
48-
prometheus.Labels{},
49-
),
50-
"checkpoint_write_time": prometheus.NewDesc(
51-
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"),
52-
"Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds",
53-
[]string{},
54-
prometheus.Labels{},
55-
),
56-
"checkpoint_sync_time": prometheus.NewDesc(
57-
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"),
58-
"Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds",
59-
[]string{},
60-
prometheus.Labels{},
61-
),
62-
"buffers_checkpoint": prometheus.NewDesc(
63-
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"),
64-
"Number of buffers written during checkpoints",
65-
[]string{},
66-
prometheus.Labels{},
67-
),
68-
"buffers_clean": prometheus.NewDesc(
69-
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"),
70-
"Number of buffers written by the background writer",
71-
[]string{},
72-
prometheus.Labels{},
73-
),
74-
"maxwritten_clean": prometheus.NewDesc(
75-
prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"),
76-
"Number of times the background writer stopped a cleaning scan because it had written too many buffers",
77-
[]string{},
78-
prometheus.Labels{},
79-
),
80-
"buffers_backend": prometheus.NewDesc(
81-
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"),
82-
"Number of buffers written directly by a backend",
83-
[]string{},
84-
prometheus.Labels{},
85-
),
86-
"buffers_backend_fsync": prometheus.NewDesc(
87-
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"),
88-
"Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)",
89-
[]string{},
90-
prometheus.Labels{},
91-
),
92-
"buffers_alloc": prometheus.NewDesc(
93-
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"),
94-
"Number of buffers allocated",
95-
[]string{},
96-
prometheus.Labels{},
97-
),
98-
"stats_reset": prometheus.NewDesc(
99-
prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"),
100-
"Time at which these statistics were last reset",
101-
[]string{},
102-
prometheus.Labels{},
103-
),
104-
}
37+
var statBGWriterCheckpointsTimed = prometheus.NewDesc(
38+
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"),
39+
"Number of scheduled checkpoints that have been performed",
40+
[]string{},
41+
prometheus.Labels{},
42+
)
43+
44+
var statBGWriterCheckpointsReq = prometheus.NewDesc(
45+
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"),
46+
"Number of requested checkpoints that have been performed",
47+
[]string{},
48+
prometheus.Labels{},
49+
)
50+
51+
var statBGWriterCheckpointWriteTime = prometheus.NewDesc(
52+
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"),
53+
"Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds",
54+
[]string{},
55+
prometheus.Labels{},
56+
)
57+
58+
var statBGWriterCheckpointSyncTime = prometheus.NewDesc(
59+
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"),
60+
"Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds",
61+
[]string{},
62+
prometheus.Labels{},
63+
)
64+
65+
var statBGWriterBuffersCheckpoint = prometheus.NewDesc(
66+
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"),
67+
"Number of buffers written during checkpoints",
68+
[]string{},
69+
prometheus.Labels{},
70+
)
71+
72+
var statBGWriterBuffersClean = prometheus.NewDesc(
73+
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"),
74+
"Number of buffers written by the background writer",
75+
[]string{},
76+
prometheus.Labels{},
77+
)
78+
79+
var statBGWriterMaxWrittenClean = prometheus.NewDesc(
80+
prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"),
81+
"Number of times the background writer stopped a cleaning scan because it had written too many buffers",
82+
[]string{},
83+
prometheus.Labels{},
84+
)
85+
86+
var statBGWriterBuffersBackend = prometheus.NewDesc(
87+
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"),
88+
"Number of buffers written directly by a backend",
89+
[]string{},
90+
prometheus.Labels{},
91+
)
92+
93+
var statBGWriterBuffersBackendFsync = prometheus.NewDesc(
94+
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"),
95+
"Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)",
96+
[]string{},
97+
prometheus.Labels{},
98+
)
99+
100+
var statBGWriterBuffersAlloc = prometheus.NewDesc(
101+
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"),
102+
"Number of buffers allocated",
103+
[]string{},
104+
prometheus.Labels{},
105+
)
106+
107+
var statBGWriterStatsReset = prometheus.NewDesc(
108+
prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"),
109+
"Time at which these statistics were last reset",
110+
[]string{},
111+
prometheus.Labels{},
112+
)
105113

106114
func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
107115
row := db.QueryRowContext(ctx,
@@ -137,57 +145,57 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<-
137145
}
138146

139147
ch <- prometheus.MustNewConstMetric(
140-
statBGWriter["checkpoints_timed"],
148+
statBGWriterCheckpointsTimed,
141149
prometheus.CounterValue,
142150
float64(cpt),
143151
)
144152
ch <- prometheus.MustNewConstMetric(
145-
statBGWriter["checkpoints_req"],
153+
statBGWriterCheckpointsReq,
146154
prometheus.CounterValue,
147155
float64(cpr),
148156
)
149157
ch <- prometheus.MustNewConstMetric(
150-
statBGWriter["checkpoint_write_time"],
158+
statBGWriterCheckpointWriteTime,
151159
prometheus.CounterValue,
152160
float64(cpwt),
153161
)
154162
ch <- prometheus.MustNewConstMetric(
155-
statBGWriter["checkpoint_sync_time"],
163+
statBGWriterCheckpointSyncTime,
156164
prometheus.CounterValue,
157165
float64(cpst),
158166
)
159167
ch <- prometheus.MustNewConstMetric(
160-
statBGWriter["buffers_checkpoint"],
168+
statBGWriterBuffersCheckpoint,
161169
prometheus.CounterValue,
162170
float64(bcp),
163171
)
164172
ch <- prometheus.MustNewConstMetric(
165-
statBGWriter["buffers_clean"],
173+
statBGWriterBuffersClean,
166174
prometheus.CounterValue,
167175
float64(bc),
168176
)
169177
ch <- prometheus.MustNewConstMetric(
170-
statBGWriter["maxwritten_clean"],
178+
statBGWriterMaxWrittenClean,
171179
prometheus.CounterValue,
172180
float64(mwc),
173181
)
174182
ch <- prometheus.MustNewConstMetric(
175-
statBGWriter["buffers_backend"],
183+
statBGWriterBuffersBackend,
176184
prometheus.CounterValue,
177185
float64(bb),
178186
)
179187
ch <- prometheus.MustNewConstMetric(
180-
statBGWriter["buffers_backend_fsync"],
188+
statBGWriterBuffersBackendFsync,
181189
prometheus.CounterValue,
182190
float64(bbf),
183191
)
184192
ch <- prometheus.MustNewConstMetric(
185-
statBGWriter["buffers_alloc"],
193+
statBGWriterBuffersAlloc,
186194
prometheus.CounterValue,
187195
float64(ba),
188196
)
189197
ch <- prometheus.MustNewConstMetric(
190-
statBGWriter["stats_reset"],
198+
statBGWriterStatsReset,
191199
prometheus.CounterValue,
192200
float64(sr.Unix()),
193201
)

0 commit comments

Comments
 (0)