@@ -2,14 +2,17 @@ package main
2
2
3
3
import (
4
4
"crypto/tls"
5
+ "database/sql"
5
6
"flag"
6
7
"fmt"
7
8
"io/ioutil"
8
9
"net/http"
9
10
"os"
10
11
"path"
11
12
"strings"
13
+ "time"
12
14
15
+ _ "github.com/go-sql-driver/mysql"
13
16
"github.com/prometheus/client_golang/prometheus"
14
17
"github.com/prometheus/client_golang/prometheus/promhttp"
15
18
"github.com/prometheus/common/log"
@@ -20,6 +23,13 @@ import (
20
23
"github.com/percona/mysqld_exporter/collector"
21
24
)
22
25
26
+ // System variable params formatting.
27
+ // See: https://github.com/go-sql-driver/mysql#system-variables
28
+ const (
29
+ sessionSettingsParam = `log_slow_filter=%27tmp_table_on_disk,filesort_on_disk%27`
30
+ timeoutParam = `lock_wait_timeout=%d`
31
+ )
32
+
23
33
var (
24
34
showVersion = flag .Bool (
25
35
"version" , false ,
49
59
"web.ssl-key-file" , "" ,
50
60
"Path to SSL key file." ,
51
61
)
62
+ exporterLockTimeout = flag .Int (
63
+ "exporter.lock_wait_timeout" , 2 ,
64
+ "Set a lock_wait_timeout on the connection to avoid long metadata locking." ,
65
+ )
66
+ exporterLogSlowFilter = flag .Bool (
67
+ "exporter.log_slow_filter" , false ,
68
+ "Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL." ,
69
+ )
70
+ exporterGlobalConnPool = flag .Bool (
71
+ "exporter.global-conn-pool" , true ,
72
+ "Use global connection pool instead of creating new pool for each http request." ,
73
+ )
74
+ exporterMaxOpenConns = flag .Int (
75
+ "exporter.max-open-conns" , 3 ,
76
+ "Maximum number of open connections to the database. https://golang.org/pkg/database/sql/#DB.SetMaxOpenConns" ,
77
+ )
78
+ exporterMaxIdleConns = flag .Int (
79
+ "exporter.max-idle-conns" , 3 ,
80
+ "Maximum number of connections in the idle connection pool. https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns" ,
81
+ )
82
+ exporterConnMaxLifetime = flag .Duration (
83
+ "exporter.conn-max-lifetime" , 60 * time .Second ,
84
+ "Maximum amount of time a connection may be reused. https://golang.org/pkg/database/sql/#DB.SetConnMaxLifetime" ,
85
+ )
86
+ collectAll = flag .Bool (
87
+ "collect.all" , false ,
88
+ "Collect all metrics." ,
89
+ )
90
+
52
91
dsn string
53
92
)
54
93
@@ -159,12 +198,11 @@ func init() {
159
198
prometheus .MustRegister (version .NewCollector ("mysqld_exporter" ))
160
199
}
161
200
162
- func newHandler (cfg * webAuth , scrapers []collector.Scraper ) http.HandlerFunc {
201
+ func newHandler (cfg * webAuth , db * sql. DB , scrapers []collector.Scraper ) http.HandlerFunc {
163
202
return func (w http.ResponseWriter , r * http.Request ) {
164
203
filteredScrapers := scrapers
165
204
params := r .URL .Query ()["collect[]" ]
166
205
log .Debugln ("collect query:" , params )
167
-
168
206
if len (params ) > 0 {
169
207
filters := make (map [string ]bool )
170
208
for _ , param := range params {
@@ -179,8 +217,20 @@ func newHandler(cfg *webAuth, scrapers []collector.Scraper) http.HandlerFunc {
179
217
}
180
218
}
181
219
220
+ // Copy db as local variable, so the pointer passed to newHandler doesn't get updated.
221
+ db := db
222
+ // If there is no global connection pool then create new.
223
+ var err error
224
+ if db == nil {
225
+ db , err = newDB (dsn )
226
+ if err != nil {
227
+ log .Fatalln ("Error opening connection to database:" , err )
228
+ }
229
+ defer db .Close ()
230
+ }
231
+
182
232
registry := prometheus .NewRegistry ()
183
- registry .MustRegister (collector .New (dsn , filteredScrapers ))
233
+ registry .MustRegister (collector .New (db , filteredScrapers ))
184
234
185
235
gatherers := prometheus.Gatherers {
186
236
prometheus .DefaultGatherer ,
@@ -236,6 +286,7 @@ func main() {
236
286
log .Infoln ("Starting mysqld_exporter" , version .Info ())
237
287
log .Infoln ("Build context" , version .BuildContext ())
238
288
289
+ // Get DSN.
239
290
dsn = os .Getenv ("DATA_SOURCE_NAME" )
240
291
if len (dsn ) == 0 {
241
292
var err error
@@ -244,6 +295,30 @@ func main() {
244
295
}
245
296
}
246
297
298
+ // Setup extra params for the DSN, default to having a lock timeout.
299
+ dsnParams := []string {fmt .Sprintf (timeoutParam , * exporterLockTimeout )}
300
+ if * exporterLogSlowFilter {
301
+ dsnParams = append (dsnParams , sessionSettingsParam )
302
+ }
303
+
304
+ if strings .Contains (dsn , "?" ) {
305
+ dsn = dsn + "&"
306
+ } else {
307
+ dsn = dsn + "?"
308
+ }
309
+ dsn += strings .Join (dsnParams , "&" )
310
+
311
+ // Open global connection pool if requested.
312
+ var db * sql.DB
313
+ var err error
314
+ if * exporterGlobalConnPool {
315
+ db , err = newDB (dsn )
316
+ if err != nil {
317
+ log .Fatalln ("Error opening connection to database:" , err )
318
+ }
319
+ defer db .Close ()
320
+ }
321
+
247
322
cfg := & webAuth {}
248
323
httpAuth := os .Getenv ("HTTP_AUTH" )
249
324
if * webAuthFile != "" {
@@ -286,9 +361,9 @@ func main() {
286
361
287
362
// Defines what to scrape in each resolution.
288
363
hr , mr , lr := enabledScrapers (scraperFlags )
289
- mux .Handle (* metricPath + "-hr" , newHandler (cfg , hr ))
290
- mux .Handle (* metricPath + "-mr" , newHandler (cfg , mr ))
291
- mux .Handle (* metricPath + "-lr" , newHandler (cfg , lr ))
364
+ mux .Handle (* metricPath + "-hr" , newHandler (cfg , db , hr ))
365
+ mux .Handle (* metricPath + "-mr" , newHandler (cfg , db , mr ))
366
+ mux .Handle (* metricPath + "-lr" , newHandler (cfg , db , lr ))
292
367
293
368
// Log which scrapers are enabled.
294
369
if len (hr ) > 0 {
@@ -349,7 +424,7 @@ func main() {
349
424
350
425
func enabledScrapers (scraperFlags map [collector.Scraper ]* bool ) (hr , mr , lr []collector.Scraper ) {
351
426
for scraper , enabled := range scraperFlags {
352
- if * enabled {
427
+ if * collectAll || * enabled {
353
428
if _ , ok := scrapersHr [scraper ]; ok {
354
429
hr = append (hr , scraper )
355
430
}
@@ -364,3 +439,16 @@ func enabledScrapers(scraperFlags map[collector.Scraper]*bool) (hr, mr, lr []col
364
439
365
440
return hr , mr , lr
366
441
}
442
+
443
+ func newDB (dsn string ) (* sql.DB , error ) {
444
+ // Validate DSN, and open connection pool.
445
+ db , err := sql .Open ("mysql" , dsn )
446
+ if err != nil {
447
+ return nil , err
448
+ }
449
+ db .SetMaxOpenConns (* exporterMaxOpenConns )
450
+ db .SetMaxIdleConns (* exporterMaxIdleConns )
451
+ db .SetConnMaxLifetime (* exporterConnMaxLifetime )
452
+
453
+ return db , nil
454
+ }
0 commit comments