Skip to content

Commit 5d6220c

Browse files
committed
updated ingress dashboard changes
1 parent 15b9a95 commit 5d6220c

File tree

4 files changed

+95
-18
lines changed

4 files changed

+95
-18
lines changed

CHANGELOG.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,3 +18,11 @@ All notable changes to this project will be documented in this file.
1818
### BugFixes:
1919
- Connection retries for ADC only on new Prometheus Requests.
2020
- Single Login Session.
21+
22+
## [1.4.6] - 2020-08-28
23+
### Added
24+
- Ingress dashboard as per modifications in naming in CIC
25+
- Logging changes
26+
27+
### BugFixes:
28+
- Error handling modification

README.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -98,15 +98,15 @@ For this:
9898
<summary>Usage as a Container</summary>
9999
<br>
100100

101-
In order to use the exporter as a container, the image ```quay.io/citrix/citrix-adc-metrics-exporter:1.4.5``` will need to be pulled using;
101+
In order to use the exporter as a container, the image ```quay.io/citrix/citrix-adc-metrics-exporter:1.4.6``` will need to be pulled using;
102102
```
103-
docker pull quay.io/citrix/citrix-adc-metrics-exporter:1.4.5
103+
docker pull quay.io/citrix/citrix-adc-metrics-exporter:1.4.6
104104
```
105105
**NOTE:** It can also be build locally using ```docker build -f Dockerfile -t <image_name>:<tag> ./```
106106

107107
Now, the exporter can be run using:
108108
```
109-
docker run -dt -p <host_port>:<container_port> --mount type=bind,source=<host-path-for-config-file>,target=/exporter/config.yaml quay.io/citrix/citrix-adc-metrics-exporter:1.4.5 [flags] --config-file=/exporter/config.yaml
109+
docker run -dt -p <host_port>:<container_port> --mount type=bind,source=<host-path-for-config-file>,target=/exporter/config.yaml quay.io/citrix/citrix-adc-metrics-exporter:1.4.6 [flags] --config-file=/exporter/config.yaml
110110
```
111111
where the flags are:
112112

@@ -129,7 +129,7 @@ flag&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbs
129129

130130
To setup the exporter as given in the diagram, the following command can be used:
131131
```
132-
docker run -dt -p 8888:8888 --mount type=bind,source=/path/to/config.yaml,target=/exporter/config.yaml --name citrix-adc-exporter quay.io/citrix/citrix-adc-metrics-exporter:1.4.5 --target-nsip=10.0.0.1 --port=8888 --config-file=/exporter/config.yaml
132+
docker run -dt -p 8888:8888 --mount type=bind,source=/path/to/config.yaml,target=/exporter/config.yaml --name citrix-adc-exporter quay.io/citrix/citrix-adc-metrics-exporter:1.4.6 --target-nsip=10.0.0.1 --port=8888 --config-file=/exporter/config.yaml
133133
```
134134
This directs the exporter container to scrape the 10.0.0.1 IP, and the expose the stats it collects on port 8888.
135135

@@ -156,7 +156,7 @@ For this:
156156
Certificate should then be mounted at the '--cacert-path' provided. For instance, if cert is 'cacert.pem' and '--cacert-path' provided in 'config.yaml' is '/exporter/cacert.pem'
157157

158158
```
159-
docker run -dt -p 8888:8888 --mount type=bind,source=/path/to/config.yaml,target=/exporter/config.yaml --mount type=bind,source=/path/to/cacert.pem,target=/exporter/cacert.pem --name citrix-adc-exporter quay.io/citrix/citrix-adc-metrics-exporter:1.4.5 --target-nsip=10.0.0.1 --port=8888 --config-file=/exporter/config.yaml
159+
docker run -dt -p 8888:8888 --mount type=bind,source=/path/to/config.yaml,target=/exporter/config.yaml --mount type=bind,source=/path/to/cacert.pem,target=/exporter/cacert.pem --name citrix-adc-exporter quay.io/citrix/citrix-adc-metrics-exporter:1.4.6 --target-nsip=10.0.0.1 --port=8888 --config-file=/exporter/config.yaml
160160
```
161161
Cert validation options can also be provided using environment variables using NS_VALIDATE_CERT, NS_CACERT_PATH. Thoughconfig file input is the preferred method.
162162

@@ -185,7 +185,7 @@ metadata:
185185
spec:
186186
containers:
187187
- name: exporter
188-
image: quay.io/citrix/citrix-adc-metrics-exporter:1.4.5
188+
image: quay.io/citrix/citrix-adc-metrics-exporter:1.4.6
189189
args:
190190
- "--target-nsip=10.0.0.1"
191191
- "--port=8888"
@@ -262,7 +262,7 @@ metadata:
262262
spec:
263263
containers:
264264
- name: exporter
265-
image: quay.io/citrix/citrix-adc-metrics-exporter:1.4.5
265+
image: quay.io/citrix/citrix-adc-metrics-exporter:1.4.6
266266
args:
267267
- "--target-nsip=10.0.0.1"
268268
- "--port=8888"

exporter.py

Lines changed: 79 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ def collect(self):
279279
try:
280280
status, entity_data = self.collect_data(entity)
281281
except Exception as e:
282-
logger.error('Could not collect metric :{}'.format(entity))
282+
logger.error('Could not collect metric :{}'.format(e))
283283

284284
if status == self.FAILURE:
285285
self.ns_session_clear()
@@ -288,6 +288,18 @@ def collect(self):
288288
if entity_data:
289289
data[entity] = entity_data
290290

291+
if 'k8s_ingress_lbvs' in self.metrics and \
292+
os.environ.get('KUBERNETES_SERVICE_HOST') is not None:
293+
lbvs_dict = None
294+
try:
295+
status, lbvs_dict = self.collect_lbvs_config()
296+
except Exception as e:
297+
logger.error('Could not collect config entries for lbvs: {}'.format(e))
298+
299+
if status == self.FAILURE:
300+
self.ns_session_clear()
301+
return
302+
291303
# Add labels to metrics and provide to Prometheus
292304
log_prefix_match = True
293305
for entity_name, entity in self.metrics.items():
@@ -322,7 +334,7 @@ def collect(self):
322334
if entity_name == "k8s_ingress_lbvs":
323335
if os.environ.get('KUBERNETES_SERVICE_HOST') is not None:
324336
prefix_match = self.update_lbvs_label(
325-
label_values, ns_metric_name, log_prefix_match)
337+
label_values, lbvs_dict, log_prefix_match)
326338
if not prefix_match:
327339
log_prefix_match = False
328340
continue
@@ -335,7 +347,7 @@ def collect(self):
335347
c.add_metric(label_values, float(
336348
data_item[ns_metric_name]))
337349
except Exception as e:
338-
logger.error('Caught exception while adding counter %s to %s: %s' % (ns_metric_name, entity_name, str(e)))
350+
logger.error('Caught exception while adding counter {} to {}: {}'.format(ns_metric_name, entity_name, str(e)))
339351

340352
yield c
341353

@@ -360,7 +372,7 @@ def collect(self):
360372
if entity_name == "k8s_ingress_lbvs":
361373
if os.environ.get('KUBERNETES_SERVICE_HOST') is not None:
362374
prefix_match = self.update_lbvs_label(
363-
label_values, ns_metric_name, log_prefix_match)
375+
label_values, lbvs_dict, log_prefix_match)
364376
if not prefix_match:
365377
log_prefix_match = False
366378
continue
@@ -388,8 +400,11 @@ def collect_data(self, entity):
388400
return self.get_lbvs_bindings_status()
389401

390402
# this is to fetch lb status for ingress/services in k8s enviroment
391-
if(entity == 'k8s_ingress_lbvs'):
392-
entity = 'lbvserver'
403+
if (entity == 'k8s_ingress_lbvs'):
404+
if os.environ.get('KUBERNETES_SERVICE_HOST') is not None:
405+
entity = 'lbvserver'
406+
else:
407+
return self.SUCCESS, None
393408

394409
# nitro call for all entities except 'services' (ie. servicegroups)
395410
if (entity == 'services'):
@@ -429,6 +444,9 @@ def get_svc_grp_services_stats(self):
429444
if servicegroup_list_ds:
430445
if 'servicegroup' not in servicegroup_list_ds:
431446
logger.info('No metric data available for servicegroup')
447+
if status == self.INVALID:
448+
logger.debug('Invalid metric fetch for servicegroup' \
449+
'with errorcode:{} '.format(servicegroup_list_ds['errorcode']))
432450
return status, None
433451
else:
434452
logger.warning('Unable to fetch data for servicegroup')
@@ -524,7 +542,7 @@ def get_entity_stat(self, url):
524542
logger.error('Stat Access Failed {}'.format(e))
525543
return self.FAILURE, None
526544

527-
def update_lbvs_label(self, label_values, ns_metric_name, log_prefix_match):
545+
def update_lbvs_label(self, label_values, lbvs_dict, log_prefix_match):
528546
'''Updates lbvserver lables for ingress and services for k8s_cic_ingress_service_stat dashboard.'''
529547
try:
530548
# If lbvs name ends with expected _svc, then label values are updated with ingress/service info.
@@ -536,8 +554,8 @@ def update_lbvs_label(self, label_values, ns_metric_name, log_prefix_match):
536554
# return if ingress name as a service
537555
if label_values[0].split("_")[3] == 'svc':
538556
if log_prefix_match:
539-
logger.debug(
540-
'k8s_ingress_service_stat Ingress dashboard cannot be used without ingress with CIC')
557+
logger.debug('k8s_ingress_service_stat dashboard' \
558+
' cannot be used without ingress with CIC')
541559
return False
542560
# update label "citrixadc_k8s_ing_lb_ingress_name" with ingress name
543561
label_values[0] = label_values[0].split(
@@ -552,7 +570,32 @@ def update_lbvs_label(self, label_values, ns_metric_name, log_prefix_match):
552570
return True
553571
else:
554572
if log_prefix_match:
555-
logger.debug('k8s_cic_ingress_service_stat Ingress dashboard cannot be used for CIC prefix {}'.format(cur_prefix))
573+
logger.debug('k8s_cic_ingress_service_stat dashboard' \
574+
' cannot be used for CIC prefix {}'.format(cur_prefix))
575+
return False
576+
elif lbvs_dict:
577+
cur_prefix = str(label_values[0].split("_")[
578+
0].split("-", 1)[0])
579+
# update lables only if prefix provided is same as CIC prefix used
580+
if cur_prefix == self.k8s_cic_prefix:
581+
comments = lbvs_dict[label_values[0]]
582+
comments = comments.split(',')
583+
if comments[0].split(':')[0] == 'lbsvc':
584+
if log_prefix_match:
585+
logger.debug('k8s_ingress_service_stat dashboard' \
586+
' cannot be used without ingress with CIC')
587+
return False
588+
589+
if comments[0].split(':')[0] == 'ing':
590+
label_values[0] = comments[0].split(':')[1]
591+
label_values[1] = comments[1].split(':')[1]
592+
label_values[2] = comments[3].split(':')[1]
593+
label_values[3] = comments[4].split(':')[1]
594+
return True
595+
else:
596+
if log_prefix_match:
597+
logger.debug('k8s_cic_ingress_service_stat dashboard' \
598+
' cannot be used for CIC prefix {}'.format(cur_prefix))
556599
return False
557600
else:
558601
return False
@@ -608,6 +651,32 @@ def ns_session_login(self):
608651
logger.error('Login Session Failed : {}'.format(e))
609652
return self.FAILURE
610653

654+
def collect_lbvs_config(self):
655+
''' This method get lbvs config entries for k8s prefix'''
656+
url = '%s://%s/nitro/v1/config/lbvserver?filter=name:%%2f^%s%%2f&attrs=name,comment' % (self.protocol, self.nsip, self.k8s_cic_prefix)
657+
try:
658+
status, data = self.get_entity_stat(url)
659+
if data:
660+
if 'lbvserver' in data:
661+
lbvs_dict = {}
662+
lbvs_list = list(data['lbvserver'])
663+
for item in lbvs_list:
664+
if 'comment' in item:
665+
lbvs_dict.update({item['name']:item['comment']})
666+
return status, lbvs_dict
667+
else:
668+
logger.debug('No lbvs config for ingress dashboard with k8s prefix "{}"'.format(self.k8s_cic_prefix))
669+
if status == self.INVALID:
670+
logger.debug('Invalid metric fetch for lbvs' \
671+
'with errorcode:{} '.format(data['errorcode']))
672+
return status, None
673+
else:
674+
logger.warning('Unable to fetch data for entity lbvserver')
675+
return status, None
676+
except Exception as e:
677+
logger.error('Error in fetching lbvs config entries {}'.format(e))
678+
return self.FAILURE, None
679+
611680

612681
def main():
613682
parser = argparse.ArgumentParser()

version/VERSION

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
1.4.5
1+
1.4.6

0 commit comments

Comments
 (0)