Targets


monitoring/alertmanager/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.1.164:9093/metrics
up endpoint="web" instance="10.42.1.164:9093" job="alertmanager-main" namespace="monitoring" pod="alertmanager-main-0" service="alertmanager-main" 4.365s ago 19.34ms

monitoring/coredns/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.190:9153/metrics
up endpoint="metrics" instance="10.42.0.190:9153" job="kube-dns" namespace="kube-system" pod="coredns-6d668d687-m7w7m" service="kube-dns" 369ms ago 16.05ms
http://10.42.0.190:9153/metrics
up endpoint="metrics" instance="10.42.0.190:9153" job="kube-dns" namespace="kube-system" pod="coredns-6d668d687-m7w7m" service="kube-dns-prometheus-discovery" 8.217s ago 23.57ms

monitoring/grafana/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.4.168:3000/metrics
up endpoint="http" instance="10.42.4.168:3000" job="grafana" namespace="monitoring" pod="grafana-f4cbc975f-s6n6h" service="grafana" 2.129s ago 3.762ms

monitoring/kube-apiserver/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.0.41:6443/metrics
up endpoint="https" instance="192.168.0.41:6443" job="apiserver" namespace="default" service="kubernetes" 17.575s ago 595.9ms

monitoring/kube-controller-manager/0 (0/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.0.41:10252/metrics
down endpoint="http-metrics" instance="192.168.0.41:10252" job="kube-controller-manager" namespace="kube-system" service="kube-controller-manager-prometheus-discovery" 16.771s ago 4.338ms Get "http://192.168.0.41:10252/metrics": dial tcp 192.168.0.41:10252: connect: connection refused

monitoring/kube-scheduler/0 (0/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.0.41:10251/metrics
down endpoint="http-metrics" instance="192.168.0.41:10251" job="kube-scheduler" namespace="kube-system" service="kube-scheduler-prometheus-discovery" 28.115s ago 6.79ms Get "http://192.168.0.41:10251/metrics": dial tcp 192.168.0.41:10251: connect: connection refused

monitoring/kube-state-metrics/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.0.199:8443/metrics
up instance="10.42.0.199:8443" job="kube-state-metrics" 13.351s ago 150.8ms

monitoring/kube-state-metrics/1 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.0.199:9443/metrics
up endpoint="https-self" instance="10.42.0.199:9443" job="kube-state-metrics" namespace="monitoring" pod="kube-state-metrics-6895565bd-cbrst" service="kube-state-metrics" 1.49s ago 14.42ms

monitoring/kubelet/0 (6/6 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.0.41:10250/metrics
up endpoint="https-metrics" instance="192.168.0.41:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="remypi" service="kubelet" 12.748s ago 544.4ms
https://192.168.0.43:10250/metrics
up endpoint="https-metrics" instance="192.168.0.43:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="remypi2" service="kubelet" 13.343s ago 153.7ms
https://192.168.0.44:10250/metrics
up endpoint="https-metrics" instance="192.168.0.44:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="remyjetson" service="kubelet" 13.152s ago 1.164s
https://192.168.0.49:10250/metrics
up endpoint="https-metrics" instance="192.168.0.49:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="remypi3" service="kubelet" 4.835s ago 71.84ms
https://192.168.0.53:10250/metrics
up endpoint="https-metrics" instance="192.168.0.53:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="remypi1" service="kubelet" 24.545s ago 64.54ms
https://192.168.0.60:10250/metrics
up endpoint="https-metrics" instance="192.168.0.60:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="remypi4" service="kubelet" 22.499s ago 45.71ms

monitoring/kubelet/1 (6/6 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.0.41:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.0.41:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="remypi" service="kubelet" 23.146s ago 169.4ms
https://192.168.0.43:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.0.43:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="remypi2" service="kubelet" 16s ago 156ms
https://192.168.0.44:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.0.44:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="remyjetson" service="kubelet" 6.728s ago 78.58ms
https://192.168.0.49:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.0.49:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="remypi3" service="kubelet" 25.116s ago 114.7ms
https://192.168.0.53:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.0.53:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="remypi1" service="kubelet" 5.785s ago 152ms
https://192.168.0.60:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.0.60:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="remypi4" service="kubelet" 4.287s ago 65.13ms

monitoring/node-exporter/0 (5/6 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.0.44:9100/metrics
down endpoint="https" instance="remyjetson" job="node-exporter" namespace="monitoring" pod="node-exporter-xmx7q" service="node-exporter" 18.929s ago 15s Get "https://192.168.0.44:9100/metrics": context deadline exceeded
https://192.168.0.41:9100/metrics
up endpoint="https" instance="remypi" job="node-exporter" namespace="monitoring" pod="node-exporter-7nqg4" service="node-exporter" 5.843s ago 331.6ms
https://192.168.0.53:9100/metrics
up endpoint="https" instance="remypi1" job="node-exporter" namespace="monitoring" pod="node-exporter-9gw8k" service="node-exporter" 7.551s ago 73.07ms
https://192.168.0.43:9100/metrics
up endpoint="https" instance="remypi2" job="node-exporter" namespace="monitoring" pod="node-exporter-pvzjv" service="node-exporter" 1.777s ago 1.651s
https://192.168.0.49:9100/metrics
up endpoint="https" instance="remypi3" job="node-exporter" namespace="monitoring" pod="node-exporter-vv6j2" service="node-exporter" 15.2s ago 90.5ms
https://192.168.0.60:9100/metrics
up endpoint="https" instance="remypi4" job="node-exporter" namespace="monitoring" pod="node-exporter-mjzqz" service="node-exporter" 13.984s ago 117.9ms

monitoring/prometheus-operator/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.0.203:8443/metrics
up endpoint="https" instance="10.42.0.203:8443" job="prometheus-operator" namespace="monitoring" pod="prometheus-operator-fcc86bfbb-h595h" service="prometheus-operator" 2.584s ago 17.14ms

monitoring/prometheus/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.4.169:9090/metrics
up endpoint="web" instance="10.42.4.169:9090" job="prometheus-k8s" namespace="monitoring" pod="prometheus-k8s-0" service="prometheus-k8s" 24.907s ago 10.09ms

monitoring/traefik/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error