@@ -41,22 +41,6 @@ const infoSchemaProcesslistQuery = `
4141		  GROUP BY user, host, command, state 
4242	` 
4343
44- // Tunable flags. 
45- var  (
46- 	processlistMinTime  =  kingpin .Flag (
47- 		"collect.info_schema.processlist.min_time" ,
48- 		"Minimum time a thread must be in each state to be counted" ,
49- 	).Default ("0" ).Int ()
50- 	processesByUserFlag  =  kingpin .Flag (
51- 		"collect.info_schema.processlist.processes_by_user" ,
52- 		"Enable collecting the number of processes by user" ,
53- 	).Default ("true" ).Bool ()
54- 	processesByHostFlag  =  kingpin .Flag (
55- 		"collect.info_schema.processlist.processes_by_host" ,
56- 		"Enable collecting the number of processes by host" ,
57- 	).Default ("true" ).Bool ()
58- )
59- 
6044// Metric descriptors. 
6145var  (
6246	processlistCountDesc  =  prometheus .NewDesc (
7862)
7963
8064// ScrapeProcesslist collects from `information_schema.processlist`. 
81- type  ScrapeProcesslist  struct {}
65+ type  ScrapeProcesslist  struct  {
66+ 	ProcessListMinTime   int 
67+ 	ProcessesByUserFlag  bool 
68+ 	ProcessesByHostFlag  bool 
69+ }
8270
8371// Name of the Scraper. Should be unique. 
8472func  (ScrapeProcesslist ) Name () string  {
@@ -95,11 +83,27 @@ func (ScrapeProcesslist) Version() float64 {
9583	return  5.1 
9684}
9785
86+ // RegisterFlags adds flags to configure the Scraper. 
87+ func  (s  * ScrapeProcesslist ) RegisterFlags (application  * kingpin.Application ) {
88+ 	application .Flag (
89+ 		"collect.info_schema.processlist.min_time" ,
90+ 		"Minimum time a thread must be in each state to be counted" ,
91+ 	).Default ("0" ).IntVar (& s .ProcessListMinTime )
92+ 	application .Flag (
93+ 		"collect.info_schema.processlist.processes_by_user" ,
94+ 		"Enable collecting the number of processes by user" ,
95+ 	).Default ("true" ).BoolVar (& s .ProcessesByUserFlag )
96+ 	application .Flag (
97+ 		"collect.info_schema.processlist.processes_by_host" ,
98+ 		"Enable collecting the number of processes by host" ,
99+ 	).Default ("true" ).BoolVar (& s .ProcessesByHostFlag )
100+ }
101+ 
98102// Scrape collects data from database connection and sends it over channel as prometheus metric. 
99- func  (ScrapeProcesslist ) Scrape (ctx  context.Context , instance  * instance , ch  chan <-  prometheus.Metric , logger  * slog.Logger ) error  {
103+ func  (s   ScrapeProcesslist ) Scrape (ctx  context.Context , instance  * instance , ch  chan <-  prometheus.Metric , logger  * slog.Logger ) error  {
100104	processQuery  :=  fmt .Sprintf (
101105		infoSchemaProcesslistQuery ,
102- 		* processlistMinTime ,
106+ 		s . ProcessListMinTime ,
103107	)
104108	db  :=  instance .getDB ()
105109	processlistRows , err  :=  db .QueryContext (ctx , processQuery )
@@ -162,12 +166,13 @@ func (ScrapeProcesslist) Scrape(ctx context.Context, instance *instance, ch chan
162166		}
163167	}
164168
165- 	if  * processesByHostFlag  {
169+ 	if  s . ProcessesByHostFlag  {
166170		for  _ , host  :=  range  sortedMapKeys (stateHostCounts ) {
167171			ch  <-  prometheus .MustNewConstMetric (processesByHostDesc , prometheus .GaugeValue , float64 (stateHostCounts [host ]), host )
168172		}
169173	}
170- 	if  * processesByUserFlag  {
174+ 
175+ 	if  s .ProcessesByUserFlag  {
171176		for  _ , user  :=  range  sortedMapKeys (stateUserCounts ) {
172177			ch  <-  prometheus .MustNewConstMetric (processesByUserDesc , prometheus .GaugeValue , float64 (stateUserCounts [user ]), user )
173178		}
0 commit comments