forked from iamseth/oracledb_exporter
-
Notifications
You must be signed in to change notification settings - Fork 0
/
metric.go
351 lines (321 loc) · 11.1 KB
/
metric.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
package main
import (
"bytes"
"context"
"crypto/sha256"
"database/sql"
"errors"
"fmt"
"hash"
"io"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
)
// Metrics to scrap. Use external file (default-metrics.toml and custom if provided)
var (
metricsLock *sync.RWMutex = new(sync.RWMutex)
metricsToScrap Metrics
hashMap map[string][]byte = make(map[string][]byte)
)
// Metrics object description
type Metric struct {
Context string
Labels []string
MetricsDesc map[string]string
MetricsType map[string]string
MetricsBuckets map[string]map[string]string
FieldToAppend string
Request string
IgnoreZeroResult bool
}
// Used to load multiple metrics from file
type Metrics struct {
Metric []Metric
}
func GetMetricType(metricType string, metricsType map[string]string) (prometheus.ValueType, error) {
var strToPromType = map[string]prometheus.ValueType{
"gauge": prometheus.GaugeValue,
"counter": prometheus.CounterValue,
"histogram": prometheus.UntypedValue,
}
strType, ok := metricsType[strings.ToLower(metricType)]
if !ok {
return prometheus.GaugeValue, nil
}
valueType, ok := strToPromType[strings.ToLower(strType)]
if !ok {
return 0, errors.New("error while getting prometheus type: " + strings.ToLower(strType))
}
return valueType, nil
}
// interface method to call ScrapeGenericValues using Metric struct values
func ScrapeMetric(db *sql.DB, ch chan<- prometheus.Metric, metricDefinition Metric, logger *logrus.Logger) error {
logger.Debug("Calling function ScrapeGenericValues()")
return ScrapeGenericValues(db, ch, metricDefinition.Context, metricDefinition.Labels,
metricDefinition.MetricsDesc, metricDefinition.MetricsType, metricDefinition.MetricsBuckets,
metricDefinition.FieldToAppend, metricDefinition.IgnoreZeroResult,
metricDefinition.Request, logger)
}
// generic method for retrieving metrics.
func ScrapeGenericValues(db *sql.DB, ch chan<- prometheus.Metric, context string, labels []string,
metricsDesc map[string]string, metricsType map[string]string, metricsBuckets map[string]map[string]string, fieldToAppend string, ignoreZeroResult bool, request string, logger *logrus.Logger) error {
metricsCount := 0
genericParser := func(row map[string]string) error {
// Construct labels value
labelsValues := []string{}
for _, label := range labels {
labelsValues = append(labelsValues, row[label])
}
// Construct Prometheus values to sent back
for metric, metricHelp := range metricsDesc {
value, err := strconv.ParseFloat(strings.TrimSpace(row[metric]), 64)
// If not a float, skip current metric
if err != nil {
logger.Error("Unable to convert current value to float (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row[metric] + ">)")
continue
}
logger.Debug("Query result looks like: ", value)
// If metric do not use a field content in metric's name
if strings.Compare(fieldToAppend, "") == 0 {
desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, context, metric),
metricHelp,
labels, nil,
)
if metricsType[strings.ToLower(metric)] == "histogram" {
count, err := strconv.ParseUint(strings.TrimSpace(row["count"]), 10, 64)
if err != nil {
logger.Error("Unable to convert count value to int (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row["count"] + ">)")
continue
}
buckets := make(map[float64]uint64)
for field, le := range metricsBuckets[metric] {
lelimit, err := strconv.ParseFloat(strings.TrimSpace(le), 64)
if err != nil {
logger.Error("Unable to convert bucket limit value to float (metric=" + metric +
",metricHelp=" + metricHelp + ",bucketlimit=<" + le + ">)")
continue
}
counter, err := strconv.ParseUint(strings.TrimSpace(row[field]), 10, 64)
if err != nil {
logger.Error("Unable to convert ", field, " value to int (metric="+metric+
",metricHelp="+metricHelp+",value=<"+row[field]+">)")
continue
}
buckets[lelimit] = counter
}
ch <- prometheus.MustNewConstHistogram(desc, count, value, buckets, labelsValues...)
} else {
vtype, err := GetMetricType(metric, metricsType)
if err != nil {
logger.Error(err)
continue
}
ch <- prometheus.MustNewConstMetric(desc, vtype, value, labelsValues...)
}
// If no labels, use metric name
} else {
desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, context, cleanName(row[fieldToAppend])),
metricHelp,
nil, nil,
)
if metricsType[strings.ToLower(metric)] == "histogram" {
count, err := strconv.ParseUint(strings.TrimSpace(row["count"]), 10, 64)
if err != nil {
logger.Error("Unable to convert count value to int (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row["count"] + ">)")
continue
}
buckets := make(map[float64]uint64)
for field, le := range metricsBuckets[metric] {
lelimit, err := strconv.ParseFloat(strings.TrimSpace(le), 64)
if err != nil {
logger.Error("Unable to convert bucket limit value to float (metric=" + metric +
",metricHelp=" + metricHelp + ",bucketlimit=<" + le + ">)")
continue
}
counter, err := strconv.ParseUint(strings.TrimSpace(row[field]), 10, 64)
if err != nil {
logger.Error("Unable to convert ", field, " value to int (metric="+metric+
",metricHelp="+metricHelp+",value=<"+row[field]+">)")
continue
}
buckets[lelimit] = counter
}
ch <- prometheus.MustNewConstHistogram(desc, count, value, buckets)
} else {
vtype, err := GetMetricType(metric, metricsType)
if err != nil {
logger.Error(err)
continue
}
ch <- prometheus.MustNewConstMetric(desc, vtype, value)
}
}
metricsCount++
}
return nil
}
logger.Debug("Calling function GeneratePrometheusMetrics()")
err := GeneratePrometheusMetrics(db, genericParser, request, logger)
logger.Debug("ScrapeGenericValues() - metricsCount: ", metricsCount)
if err != nil {
return err
}
if !ignoreZeroResult && metricsCount == 0 {
return errors.New("no metrics found while parsing")
}
return err
}
// inspired by https://kylewbanks.com/blog/query-result-to-map-in-golang
// Parse SQL result and call parsing function to each row
func GeneratePrometheusMetrics(db *sql.DB, parse func(row map[string]string) error, query string, logger *logrus.Logger) error {
// Add a timeout
timeout, err := strconv.Atoi(*queryTimeout)
if err != nil {
logger.Error("error while converting timeout option value: ", err)
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
defer cancel()
rows, err := db.QueryContext(ctx, query)
if ctx.Err() == context.DeadlineExceeded {
return errors.New("oracle query timed out")
}
if err != nil {
return err
}
cols, err := rows.Columns()
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
// Create a slice of interface{}'s to represent each column,
// and a second slice to contain pointers to each item in the columns slice.
columns := make([]interface{}, len(cols))
columnPointers := make([]interface{}, len(cols))
for i := range columns {
columnPointers[i] = &columns[i]
}
// Scan the result into the column pointers...
if err := rows.Scan(columnPointers...); err != nil {
return err
}
// Create our map, and retrieve the value for each column from the pointers slice,
// storing it in the map with the name of the column as the key.
m := make(map[string]string)
for i, colName := range cols {
val := columnPointers[i].(*interface{})
m[strings.ToLower(colName)] = fmt.Sprintf("%v", *val)
}
// Call function to parse row
if err := parse(m); err != nil {
return err
}
}
return nil
}
func checkIfMetricsChanged(logger *logrus.Logger) bool {
h := sha256.New()
if err := hashFile(h, *defaultFileMetrics); err != nil {
logger.Errorf("Unable to hash file [%s], err = %s", *defaultFileMetrics, err)
return false
}
if !bytes.Equal(hashMap[*defaultFileMetrics], h.Sum(nil)) {
logger.Info(*defaultFileMetrics, " has been changed. Reloading metrics...")
hashMap[*defaultFileMetrics] = h.Sum(nil)
return true
}
for _, _customMetrics := range strings.Split(*customMetrics, ",") {
if len(_customMetrics) == 0 {
continue
}
logger.Debug("Checking modifications in following metrics definition file:", _customMetrics)
h := sha256.New()
if err := hashFile(h, _customMetrics); err != nil {
logger.Errorf("Unable to hash file [%s], error = %s", _customMetrics, err)
return false
}
// If any of files has been changed reload metrics
if !bytes.Equal(hashMap[_customMetrics], h.Sum(nil)) {
logger.Info(_customMetrics, " has been changed. Reloading metrics...")
hashMap[_customMetrics] = h.Sum(nil)
return true
}
}
return false
}
func reloadMetrics(logger *logrus.Logger) {
var metrics Metrics
// Truncate metricsToScrap
// metricsToScrap.Metric = []Metric{}
// Load default metrics
if _, err := toml.DecodeFile(*defaultFileMetrics, &metrics); err != nil {
logger.Errorf("loading [%s] error = %s", *defaultFileMetrics, err)
return
} else {
logger.Info("Successfully loaded default metrics from: " + *defaultFileMetrics)
}
h := sha256.New()
if err := hashFile(h, *defaultFileMetrics); err != nil {
logger.Errorf("Unable to hash file [%s], err = %s", *defaultFileMetrics, err)
}
hashMap[*defaultFileMetrics] = h.Sum(nil)
// If custom metrics, load it
if strings.Compare(*customMetrics, "") != 0 {
for _, _customMetrics := range strings.Split(*customMetrics, ",") {
if len(_customMetrics) == 0 {
continue
}
h := sha256.New()
if err := hashFile(h, _customMetrics); err != nil {
logger.Errorf("Unable to hash file [%s], err = %s", _customMetrics, err)
}
hashMap[_customMetrics] = h.Sum(nil)
var additionalMetrics Metrics
if _, err := toml.DecodeFile(_customMetrics, &additionalMetrics); err != nil {
logger.Errorf("loading [%s] error = %s", _customMetrics, err)
continue
} else {
logger.Info("Successfully loaded custom metrics from: " + _customMetrics)
}
metrics.Metric = append(metrics.Metric, additionalMetrics.Metric...)
}
} else {
logger.Info("No custom metrics defined.")
}
metricsLock.Lock()
defer metricsLock.Unlock()
metricsToScrap = metrics
}
// Oracle gives us some ugly names back. This function cleans things up for Prometheus.
func cleanName(s string) string {
s = strings.Replace(s, " ", "_", -1) // Remove spaces
s = strings.Replace(s, "(", "", -1) // Remove open parenthesis
s = strings.Replace(s, ")", "", -1) // Remove close parenthesis
s = strings.Replace(s, "/", "", -1) // Remove forward slashes
s = strings.Replace(s, "*", "", -1) // Remove asterisks
s = strings.ToLower(s)
return s
}
func hashFile(h hash.Hash, fn string) error {
f, err := os.Open(fn)
if err != nil {
return err
}
defer f.Close()
if _, err := io.Copy(h, f); err != nil {
return err
}
return nil
}