@@ -42,22 +42,6 @@ const infoSchemaProcesslistQuery = `
4242 GROUP BY user, SUBSTRING_INDEX(host, ':', 1), command, state
4343 `
4444
45- // Tunable flags.
46- var (
47- processlistMinTime = kingpin .Flag (
48- "collect.info_schema.processlist.min_time" ,
49- "Minimum time a thread must be in each state to be counted" ,
50- ).Default ("0" ).Int ()
51- processesByUserFlag = kingpin .Flag (
52- "collect.info_schema.processlist.processes_by_user" ,
53- "Enable collecting the number of processes by user" ,
54- ).Default ("true" ).Bool ()
55- processesByHostFlag = kingpin .Flag (
56- "collect.info_schema.processlist.processes_by_host" ,
57- "Enable collecting the number of processes by host" ,
58- ).Default ("true" ).Bool ()
59- )
60-
6145// Metric descriptors.
6246var (
6347 processlistCountDesc = prometheus .NewDesc (
7963)
8064
8165// ScrapeProcesslist collects from `information_schema.processlist`.
82- type ScrapeProcesslist struct {}
66+ type ScrapeProcesslist struct {
67+ ProcessListMinTime int
68+ ProcessesByUserFlag bool
69+ ProcessesByHostFlag bool
70+ }
8371
8472// Name of the Scraper. Should be unique.
8573func (ScrapeProcesslist ) Name () string {
@@ -96,11 +84,27 @@ func (ScrapeProcesslist) Version() float64 {
9684 return 5.1
9785}
9886
87+ // RegisterFlags adds flags to configure the Scraper.
88+ func (s * ScrapeProcesslist ) RegisterFlags (application * kingpin.Application ) {
89+ application .Flag (
90+ "collect.info_schema.processlist.min_time" ,
91+ "Minimum time a thread must be in each state to be counted" ,
92+ ).Default ("0" ).IntVar (& s .ProcessListMinTime )
93+ application .Flag (
94+ "collect.info_schema.processlist.processes_by_user" ,
95+ "Enable collecting the number of processes by user" ,
96+ ).Default ("true" ).BoolVar (& s .ProcessesByUserFlag )
97+ application .Flag (
98+ "collect.info_schema.processlist.processes_by_host" ,
99+ "Enable collecting the number of processes by host" ,
100+ ).Default ("true" ).BoolVar (& s .ProcessesByHostFlag )
101+ }
102+
99103// Scrape collects data from database connection and sends it over channel as prometheus metric.
100- func (ScrapeProcesslist ) Scrape (ctx context.Context , db * sql.DB , ch chan <- prometheus.Metric , logger log.Logger ) error {
104+ func (s ScrapeProcesslist ) Scrape (ctx context.Context , db * sql.DB , ch chan <- prometheus.Metric , logger log.Logger ) error {
101105 processQuery := fmt .Sprintf (
102106 infoSchemaProcesslistQuery ,
103- * processlistMinTime ,
107+ s . ProcessListMinTime ,
104108 )
105109 processlistRows , err := db .QueryContext (ctx , processQuery )
106110 if err != nil {
@@ -162,12 +166,13 @@ func (ScrapeProcesslist) Scrape(ctx context.Context, db *sql.DB, ch chan<- prome
162166 }
163167 }
164168
165- if * processesByHostFlag {
169+ if s . ProcessesByHostFlag {
166170 for _ , host := range sortedMapKeys (stateHostCounts ) {
167171 ch <- prometheus .MustNewConstMetric (processesByHostDesc , prometheus .GaugeValue , float64 (stateHostCounts [host ]), host )
168172 }
169173 }
170- if * processesByUserFlag {
174+
175+ if s .ProcessesByUserFlag {
171176 for _ , user := range sortedMapKeys (stateUserCounts ) {
172177 ch <- prometheus .MustNewConstMetric (processesByUserDesc , prometheus .GaugeValue , float64 (stateUserCounts [user ]), user )
173178 }
0 commit comments