Skip to content

Commit 18b13f0

Browse files
committed
Make setting prometheus query interval minute configurable
We have some clusters where we would like to change the frequency at which we collect metrics and generate the report based on that. To accomplish this, we need to make 2 changes. We set an environment variable that is passed on to the metrics collector scripts. And we insert the collection interval into the json file. The merge scripts will read this interval when loading these files. For backwards compatibility with our old metrics file, we'll default to 15 minutes when processing these files.
1 parent ffe21b7 commit 18b13f0

File tree

3 files changed

+28
-4
lines changed

3 files changed

+28
-4
lines changed

openshift_metrics/config.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,3 +14,5 @@
1414
S3_SECRET_ACCESS_KEY = os.getenv("S3_OUTPUT_SECRET_ACCESS_KEY")
1515
S3_INVOICE_BUCKET = os.getenv("S3_INVOICE_BUCKET", "nerc-invoicing")
1616
S3_METRICS_BUCKET = os.getenv("S3_METRICS_BUCKET", "openshift_metrics")
17+
PROM_QUERY_INTERVAL_MINUTES = int(os.getenv("PROM_QUERY_INTERVAL_MINUTES", 15))
18+
assert PROM_QUERY_INTERVAL_MINUTES >= 1, "Query interval must be at least 1 minute"

openshift_metrics/merge.py

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
Merges metrics from files and produces reports by pod and by namespace
33
"""
44

5+
import sys
56
import logging
67
import argparse
78
from datetime import datetime, UTC
@@ -12,7 +13,7 @@
1213

1314
from openshift_metrics import utils, invoice
1415
from openshift_metrics.metrics_processor import MetricsProcessor
15-
from openshift_metrics.config import S3_INVOICE_BUCKET
16+
from openshift_metrics.config import S3_INVOICE_BUCKET, PROM_QUERY_INTERVAL_MINUTES
1617

1718
logging.basicConfig(level=logging.INFO)
1819
logger = logging.getLogger(__name__)
@@ -106,7 +107,26 @@ def main():
106107
report_start_date = None
107108
report_end_date = None
108109
cluster_name = None
109-
processor = MetricsProcessor()
110+
interval_minutes = None
111+
112+
for file in files:
113+
with open(file, "r") as jsonfile:
114+
metrics_from_file = json.load(jsonfile)
115+
if interval_minutes is None:
116+
interval_minutes = metrics_from_file.get("interval_minutes")
117+
else:
118+
interval_minutes_from_file = metrics_from_file["interval_minutes"]
119+
if interval_minutes != interval_minutes_from_file:
120+
sys.exit(f"Prometheus interval minutes differ in the given set of files {interval_minutes} != {interval_minutes_from_file}")
121+
122+
if interval_minutes is None:
123+
logger.info(f"No prometheus query interval minutes found in the given set of files. Using the provided interval: {PROM_QUERY_INTERVAL_MINUTES} minute(s)")
124+
interval_minutes = PROM_QUERY_INTERVAL_MINUTES
125+
else:
126+
logger.info(f"Prometheus Query interval set to {interval_minutes} minute(s) from file")
127+
128+
129+
processor = MetricsProcessor(interval_minutes)
110130

111131
for file in files:
112132
with open(file, "r") as jsonfile:

openshift_metrics/openshift_prometheus_metrics.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
OPENSHIFT_PROMETHEUS_URL,
2727
OPENSHIFT_TOKEN,
2828
S3_METRICS_BUCKET,
29+
PROM_QUERY_INTERVAL_MINUTES,
2930
)
3031

3132
logging.basicConfig(level=logging.INFO)
@@ -89,14 +90,15 @@ def main():
8990
output_file = f"metrics-{report_start_date}-to-{report_end_date}.json"
9091

9192
logger.info(
92-
f"Generating report starting {report_start_date} and ending {report_end_date} in {output_file}"
93+
f"Generating report starting {report_start_date} and ending {report_end_date} in {output_file} with interval {PROM_QUERY_INTERVAL_MINUTES} minute"
9394
)
9495

95-
prom_client = PrometheusClient(openshift_url, OPENSHIFT_TOKEN)
96+
prom_client = PrometheusClient(openshift_url, OPENSHIFT_TOKEN, PROM_QUERY_INTERVAL_MINUTES)
9697

9798
metrics_dict = {}
9899
metrics_dict["start_date"] = report_start_date
99100
metrics_dict["end_date"] = report_end_date
101+
metrics_dict["interval_minutes"] = PROM_QUERY_INTERVAL_MINUTES
100102
metrics_dict["cluster_name"] = URL_CLUSTER_NAME_MAPPING.get(
101103
args.openshift_url, args.openshift_url
102104
)

0 commit comments

Comments
 (0)