Expose specific metrics location for my custom collector of metrics
Question:
I’m writing custom k8s metrics collector for purpose of monitoring application’s versions in two various clusters. How to expose specific location for metrics like "/metrics" and collect metrics in infinity cycle?
That’s my custom metrics collector example:
import time
from prometheus_client import start_http_server
from prometheus_client.core import REGISTRY, CounterMetricFamily
from kubernetes import client, config, watch
class CustomCollector(object):
def __init__(self):
pass
def collect(self):
g = CounterMetricFamily("retail_pods_info", 'info about pods', labels=['secret','namespace','deployment_name','image','helm'])
config.load_kube_config('config')
v1 = client.CoreV1Api()
group = "argoproj.io"
version = "v1alpha1"
plural = "applications"
#kind = "Application"
namespace = "argo-cd"
pod_list: client.V1PodList = v1.list_pod_for_all_namespaces(watch=False)
pods: list[client.V1Pod] = pod_list.items
metrics_list = []
for pod in pods:
metadata: client.V1ObjectMeta = pod.metadata
spec: client.V1PodSpec = pod.spec
volumes: list[client.V1Volume] = spec.volumes
if volumes is not None:
for volume in volumes:
if volume.projected:
projected: client.V1ProjectedVolumeSource = volume.projected
sources: list[client.V1VolumeProjection] = projected.sources
for source in sources:
if source.secret:
secret: client.V1SecretProjection = source.secret
s = secret.name + " " + metadata.namespace.lower() + " " + metadata.name.lower().rsplit('-',2)[0] + " " + pod.spec.containers[0].image
metrics_list.append(s.split())
api_client = client.ApiClient()
argocd_api = client.CustomObjectsApi(api_client)
argocd_apps = argocd_api.list_namespaced_custom_object(group, version, namespace, plural, watch=False)
for metric in metrics_list:
for app in argocd_apps["items"]:
if metric[2] == app["metadata"]["name"]:
helm_version=app["spec"]["source"]["repoURL"]+"-"+app["spec"]["source"]["targetRevision"]
metric.append(helm_version)
g.add_metric([metric[0], metric[1], metric[2], metric[3], metric[4]], 1)
yield g
#
# for k in metrics_list:
# g.add_metric([k[0],k[1],k[2],k[3]], 1)
# yield g
if __name__ == '__main__':
start_http_server(8000)
REGISTRY.register(CustomCollector())
while True:
time.sleep(60)
Answers:
Regarding endpoint /metrics
:
Method start_http_server
starts a server on the specified port, that responds to any query with metrics. So request to path /metrics
will be responded to with generated metric by default.
Regarding "collect metrics in infinity cycle":
Your app is already doing it (kinda). Since you registered your custom collector every request will invoke the collect
method. And since Prometheus while gathering metrics basically does it in an infinite cycle – your collector does it too.
I’m writing custom k8s metrics collector for purpose of monitoring application’s versions in two various clusters. How to expose specific location for metrics like "/metrics" and collect metrics in infinity cycle?
That’s my custom metrics collector example:
import time
from prometheus_client import start_http_server
from prometheus_client.core import REGISTRY, CounterMetricFamily
from kubernetes import client, config, watch
class CustomCollector(object):
def __init__(self):
pass
def collect(self):
g = CounterMetricFamily("retail_pods_info", 'info about pods', labels=['secret','namespace','deployment_name','image','helm'])
config.load_kube_config('config')
v1 = client.CoreV1Api()
group = "argoproj.io"
version = "v1alpha1"
plural = "applications"
#kind = "Application"
namespace = "argo-cd"
pod_list: client.V1PodList = v1.list_pod_for_all_namespaces(watch=False)
pods: list[client.V1Pod] = pod_list.items
metrics_list = []
for pod in pods:
metadata: client.V1ObjectMeta = pod.metadata
spec: client.V1PodSpec = pod.spec
volumes: list[client.V1Volume] = spec.volumes
if volumes is not None:
for volume in volumes:
if volume.projected:
projected: client.V1ProjectedVolumeSource = volume.projected
sources: list[client.V1VolumeProjection] = projected.sources
for source in sources:
if source.secret:
secret: client.V1SecretProjection = source.secret
s = secret.name + " " + metadata.namespace.lower() + " " + metadata.name.lower().rsplit('-',2)[0] + " " + pod.spec.containers[0].image
metrics_list.append(s.split())
api_client = client.ApiClient()
argocd_api = client.CustomObjectsApi(api_client)
argocd_apps = argocd_api.list_namespaced_custom_object(group, version, namespace, plural, watch=False)
for metric in metrics_list:
for app in argocd_apps["items"]:
if metric[2] == app["metadata"]["name"]:
helm_version=app["spec"]["source"]["repoURL"]+"-"+app["spec"]["source"]["targetRevision"]
metric.append(helm_version)
g.add_metric([metric[0], metric[1], metric[2], metric[3], metric[4]], 1)
yield g
#
# for k in metrics_list:
# g.add_metric([k[0],k[1],k[2],k[3]], 1)
# yield g
if __name__ == '__main__':
start_http_server(8000)
REGISTRY.register(CustomCollector())
while True:
time.sleep(60)
Regarding endpoint /metrics
:
Method start_http_server
starts a server on the specified port, that responds to any query with metrics. So request to path /metrics
will be responded to with generated metric by default.
Regarding "collect metrics in infinity cycle":
Your app is already doing it (kinda). Since you registered your custom collector every request will invoke the collect
method. And since Prometheus while gathering metrics basically does it in an infinite cycle – your collector does it too.