forked from AICoE/prometheus-anomaly-detector
-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
181 lines (150 loc) · 6.18 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
"""docstring for packages."""
import time
import os
import logging
from datetime import datetime
from multiprocessing import Process, Queue
from queue import Empty as EmptyQueueException
import tornado.ioloop
import tornado.web
from prometheus_client import Gauge, generate_latest, REGISTRY
from prometheus_api_client import PrometheusConnect, Metric
from configuration import Configuration
import model
import schedule
# Set up logging
_LOGGER = logging.getLogger(__name__)
METRICS_LIST = Configuration.metrics_list
# list of ModelPredictor Objects shared between processes
PREDICTOR_MODEL_LIST = list()
pc = PrometheusConnect(
url=Configuration.prometheus_url,
headers=Configuration.prom_connect_headers,
disable_ssl=True,
)
for metric in METRICS_LIST:
# Initialize a predictor for all metrics first
metric_init = pc.get_current_metric_value(metric_name=metric)
for unique_metric in metric_init:
PREDICTOR_MODEL_LIST.append(
model.MetricPredictor(
unique_metric,
rolling_data_window_size=Configuration.rolling_training_window_size,
)
)
# A gauge set for the predicted values
GAUGE_DICT = dict()
for predictor in PREDICTOR_MODEL_LIST:
unique_metric = predictor.metric
label_list = list(unique_metric.label_config.keys())
label_list.append("value_type")
if unique_metric.metric_name not in GAUGE_DICT:
GAUGE_DICT[unique_metric.metric_name] = Gauge(
unique_metric.metric_name + "_" + predictor.model_name,
predictor.model_description,
label_list,
)
class MainHandler(tornado.web.RequestHandler):
"""Tornado web request handler."""
def initialize(self, data_queue):
"""Check if new predicted values are available in the queue before the get request."""
try:
model_list = data_queue.get_nowait()
self.settings["model_list"] = model_list
except EmptyQueueException:
pass
async def get(self):
"""Fetch and publish metric values asynchronously."""
# update metric value on every request and publish the metric
for predictor_model in self.settings["model_list"]:
# get the current metric value so that it can be compared with the
# predicted values
current_metric_value = Metric(
pc.get_current_metric_value(
metric_name=predictor_model.metric.metric_name,
label_config=predictor_model.metric.label_config,
)[0]
)
metric_name = predictor_model.metric.metric_name
prediction = predictor_model.predict_value(datetime.now())
# Check for all the columns available in the prediction
# and publish the values for each of them
for column_name in list(prediction.columns):
GAUGE_DICT[metric_name].labels(
**predictor_model.metric.label_config, value_type=column_name
).set(prediction[column_name][0])
# Calculate for an anomaly (can be different for different models)
anomaly = 1
if (
current_metric_value.metric_values["y"][0] < prediction["yhat_upper"][0]
) and (
current_metric_value.metric_values["y"][0] > prediction["yhat_lower"][0]
):
anomaly = 0
# create a new time series that has value_type=anomaly
# this value is 1 if an anomaly is found 0 if not
GAUGE_DICT[metric_name].labels(
**predictor_model.metric.label_config, value_type="anomaly"
).set(anomaly)
self.write(generate_latest(REGISTRY).decode("utf-8"))
self.set_header("Content-Type", "text; charset=utf-8")
def make_app(data_queue):
"""Initialize the tornado web app."""
_LOGGER.info("Initializing Tornado Web App")
return tornado.web.Application(
[
(r"/metrics", MainHandler, dict(data_queue=data_queue)),
(r"/", MainHandler, dict(data_queue=data_queue)),
]
)
def train_model(initial_run=False, data_queue=None):
"""Train the machine learning model."""
for predictor_model in PREDICTOR_MODEL_LIST:
metric_to_predict = predictor_model.metric
data_start_time = datetime.now() - Configuration.metric_chunk_size
if initial_run:
data_start_time = (
datetime.now() - Configuration.rolling_training_window_size
)
# Download new metric data from prometheus
new_metric_data = pc.get_metric_range_data(
metric_name=metric_to_predict.metric_name,
label_config=metric_to_predict.label_config,
start_time=data_start_time,
end_time=datetime.now(),
)[0]
# Train the new model
start_time = datetime.now()
predictor_model.train(
new_metric_data, Configuration.retraining_interval_minutes
)
_LOGGER.info(
"Total Training time taken = %s, for metric: %s %s",
str(datetime.now() - start_time),
metric_to_predict.metric_name,
metric_to_predict.label_config,
)
data_queue.put(PREDICTOR_MODEL_LIST)
if __name__ == "__main__":
# Queue to share data between the tornado server and the model training
predicted_model_queue = Queue()
# Initial run to generate metrics, before they are exposed
train_model(initial_run=True, data_queue=predicted_model_queue)
# Set up the tornado web app
app = make_app(predicted_model_queue)
app.listen(8080)
server_process = Process(target=tornado.ioloop.IOLoop.instance().start)
# Start up the server to expose the metrics.
server_process.start()
# Schedule the model training
schedule.every(Configuration.retraining_interval_minutes).minutes.do(
train_model, initial_run=False, data_queue=predicted_model_queue
)
_LOGGER.info(
"Will retrain model every %s minutes", Configuration.retraining_interval_minutes
)
while True:
schedule.run_pending()
time.sleep(1)
# join the server process in case the main process ends
server_process.join()