Skip to content

Commit ba46974

Browse files
authored
Merge pull request #32 from percona/PMM-2612
PMM-2612: Scrape MySQL database only once.
2 parents 303c47f + a3e1bfb commit ba46974

File tree

3 files changed

+39
-54
lines changed

3 files changed

+39
-54
lines changed

collector/exporter.go

+30-47
Original file line numberDiff line numberDiff line change
@@ -36,75 +36,50 @@ var (
3636
type Exporter struct {
3737
db *sql.DB
3838
scrapers []Scraper
39-
stats *Stats
40-
mysqldUp prometheus.Gauge
39+
metrics Metrics
4140
}
4241

4342
// New returns a new MySQL exporter for the provided DSN.
44-
func New(db *sql.DB, scrapers []Scraper, stats *Stats) *Exporter {
43+
func New(db *sql.DB, metrics Metrics, scrapers []Scraper) *Exporter {
4544
return &Exporter{
4645
db: db,
4746
scrapers: scrapers,
48-
stats: stats,
49-
mysqldUp: prometheus.NewGauge(prometheus.GaugeOpts{
50-
Namespace: namespace,
51-
Name: "up",
52-
Help: "Whether the MySQL server is up.",
53-
}),
47+
metrics: metrics,
5448
}
5549
}
5650

5751
// Describe implements prometheus.Collector.
5852
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
59-
// We cannot know in advance what metrics the exporter will generate
60-
// from MySQL. So we use the poor man's describe method: Run a collect
61-
// and send the descriptors of all the collected metrics. The problem
62-
// here is that we need to connect to the MySQL DB. If it is currently
63-
// unavailable, the descriptors will be incomplete. Since this is a
64-
// stand-alone exporter and not used as a library within other code
65-
// implementing additional metrics, the worst that can happen is that we
66-
// don't detect inconsistent metrics created by this exporter
67-
// itself. Also, a change in the monitored MySQL instance may change the
68-
// exported metrics during the runtime of the exporter.
69-
70-
metricCh := make(chan prometheus.Metric)
71-
doneCh := make(chan struct{})
72-
73-
go func() {
74-
for m := range metricCh {
75-
ch <- m.Desc()
76-
}
77-
close(doneCh)
78-
}()
79-
80-
e.Collect(metricCh)
81-
close(metricCh)
82-
<-doneCh
53+
ch <- e.metrics.TotalScrapes.Desc()
54+
ch <- e.metrics.Error.Desc()
55+
e.metrics.ScrapeErrors.Describe(ch)
56+
ch <- e.metrics.MySQLUp.Desc()
8357
}
8458

8559
// Collect implements prometheus.Collector.
8660
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
8761
e.scrape(ch)
8862

89-
ch <- e.stats.TotalScrapes
90-
ch <- e.stats.Error
91-
e.stats.ScrapeErrors.Collect(ch)
92-
ch <- e.mysqldUp
63+
ch <- e.metrics.TotalScrapes
64+
ch <- e.metrics.Error
65+
e.metrics.ScrapeErrors.Collect(ch)
66+
ch <- e.metrics.MySQLUp
9367
}
9468

9569
func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
96-
e.stats.Error.Set(0)
97-
e.stats.TotalScrapes.Inc()
70+
e.metrics.Error.Set(0)
71+
e.metrics.TotalScrapes.Inc()
9872
var err error
9973

10074
scrapeTime := time.Now()
10175
if err = e.db.Ping(); err != nil {
10276
log.Errorln("Error pinging mysqld:", err)
103-
e.mysqldUp.Set(0)
104-
e.stats.Error.Set(1)
77+
e.metrics.MySQLUp.Set(0)
78+
e.metrics.Error.Set(1)
10579
return
10680
}
107-
e.mysqldUp.Set(1)
81+
e.metrics.MySQLUp.Set(1)
82+
10883
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "connection")
10984

11085
versionNum := getMySQLVersion(e.db)
@@ -121,8 +96,8 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
12196
scrapeTime := time.Now()
12297
if err := scraper.Scrape(e.db, ch); err != nil {
12398
log.Errorln("Error scraping for "+label+":", err)
124-
e.stats.ScrapeErrors.WithLabelValues(label).Inc()
125-
e.stats.Error.Set(1)
99+
e.metrics.ScrapeErrors.WithLabelValues(label).Inc()
100+
e.metrics.Error.Set(1)
126101
}
127102
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), label)
128103
}(scraper)
@@ -146,18 +121,21 @@ func getMySQLVersion(db *sql.DB) float64 {
146121
return versionNum
147122
}
148123

149-
type Stats struct {
124+
// Metrics represents exporter metrics which values can be carried between http requests.
125+
type Metrics struct {
150126
TotalScrapes prometheus.Counter
151127
ScrapeErrors *prometheus.CounterVec
152128
Error prometheus.Gauge
129+
MySQLUp prometheus.Gauge
153130
}
154131

155-
func NewStats(resolution string) *Stats {
132+
// NewMetrics creates new Metrics instance.
133+
func NewMetrics(resolution string) Metrics {
156134
subsystem := exporter
157135
if resolution != "" {
158136
subsystem = exporter + "_" + resolution
159137
}
160-
return &Stats{
138+
return Metrics{
161139
TotalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
162140
Namespace: namespace,
163141
Subsystem: subsystem,
@@ -176,5 +154,10 @@ func NewStats(resolution string) *Stats {
176154
Name: "last_scrape_error",
177155
Help: "Whether the last scrape of metrics from MySQL resulted in an error (1 for error, 0 for success).",
178156
}),
157+
MySQLUp: prometheus.NewGauge(prometheus.GaugeOpts{
158+
Namespace: namespace,
159+
Name: "up",
160+
Help: "Whether the MySQL server is up.",
161+
}),
179162
}
180163
}

collector/exporter_test.go

+4-2
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,10 @@ func TestExporter(t *testing.T) {
2525

2626
exporter := New(
2727
db,
28-
[]Scraper{ScrapeGlobalStatus{}},
29-
NewStats(""),
28+
NewMetrics(""),
29+
[]Scraper{
30+
ScrapeGlobalStatus{},
31+
},
3032
)
3133

3234
convey.Convey("Metrics describing", t, func() {

mysqld_exporter.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ func init() {
206206
prometheus.MustRegister(version.NewCollector("mysqld_exporter"))
207207
}
208208

209-
func newHandler(cfg *webAuth, db *sql.DB, scrapers []collector.Scraper, stats *collector.Stats, defaultGatherer bool) http.HandlerFunc {
209+
func newHandler(cfg *webAuth, db *sql.DB, metrics collector.Metrics, scrapers []collector.Scraper, defaultGatherer bool) http.HandlerFunc {
210210
return func(w http.ResponseWriter, r *http.Request) {
211211
filteredScrapers := scrapers
212212
params := r.URL.Query()["collect[]"]
@@ -238,7 +238,7 @@ func newHandler(cfg *webAuth, db *sql.DB, scrapers []collector.Scraper, stats *c
238238
}
239239

240240
registry := prometheus.NewRegistry()
241-
registry.MustRegister(collector.New(db, filteredScrapers, stats))
241+
registry.MustRegister(collector.New(db, metrics, filteredScrapers))
242242

243243
gatherers := prometheus.Gatherers{}
244244
if defaultGatherer {
@@ -371,9 +371,9 @@ func main() {
371371

372372
// Defines what to scrape in each resolution.
373373
hr, mr, lr := enabledScrapers(scraperFlags)
374-
mux.Handle(*metricPath+"-hr", newHandler(cfg, db, hr, collector.NewStats("hr"), true))
375-
mux.Handle(*metricPath+"-mr", newHandler(cfg, db, mr, collector.NewStats("mr"), false))
376-
mux.Handle(*metricPath+"-lr", newHandler(cfg, db, lr, collector.NewStats("lr"), false))
374+
mux.Handle(*metricPath+"-hr", newHandler(cfg, db, collector.NewMetrics("hr"), hr, true))
375+
mux.Handle(*metricPath+"-mr", newHandler(cfg, db, collector.NewMetrics("mr"), mr, false))
376+
mux.Handle(*metricPath+"-lr", newHandler(cfg, db, collector.NewMetrics("lr"), lr, false))
377377

378378
// Log which scrapers are enabled.
379379
if len(hr) > 0 {

0 commit comments

Comments
 (0)