diff --git a/02-path-working-with-clusters/201-cluster-monitoring/readme.adoc b/02-path-working-with-clusters/201-cluster-monitoring/readme.adoc index 16a8d22f..8bec3093 100644 --- a/02-path-working-with-clusters/201-cluster-monitoring/readme.adoc +++ b/02-path-working-with-clusters/201-cluster-monitoring/readme.adoc @@ -258,7 +258,17 @@ Prometheus is now scraping metrics from the different scraping targets and we fo $ kubectl port-forward $(kubectl get po -l prometheus=prometheus -n monitoring -o jsonpath={.items[0].metadata.name}) 9090 -n monitoring Forwarding from 127.0.0.1:9090 -> 9090 -Now open the browser at http://localhost:9090/targets and all targets should be shown as `UP` (it might take a couple of minutes until data collectors are up and running for the first time). The browser displays the output as shown: +Now open the browser at http://localhost:9090/targets. + +If you are running this in the Cloud9 IDE, you will need to run the following to be able to visualize your dashboard: + + $ kubectl port-forward $(kubectl get po -l prometheus=prometheus -n monitoring -o jsonpath={.items[0].metadata.name}) 8080:9090 -n monitoring + Forwarding from 127.0.0.1:8080 -> 9090 + Forwarding from [::1]:8080 -> 9090 + +The dashboard will be available at https://.vfs.cloud9..amazonaws.com/targets. + +All targets should be shown as `UP` (it might take a couple of minutes until data collectors are up and running for the first time). The browser displays the output as shown: image::monitoring-grafana-prometheus-dashboard-1.png[] image::monitoring-grafana-prometheus-dashboard-2.png[] @@ -287,7 +297,17 @@ Lets forward the grafana dashboard to a local port: $ kubectl port-forward $(kubectl get pod -l app=grafana -o jsonpath={.items[0].metadata.name} -n monitoring) 3000 -n monitoring Forwarding from 127.0.0.1:3000 -> 3000 -Grafana dashboard is now accessible at http://localhost:3000/. The complete list of dashboards is available using the search button at the top: +Grafana dashboard is now accessible at http://localhost:3000/. + +If you are running this in the Cloud9 IDE, you will need to run the following to be able to visualize your dashboard: + + $ kubectl port-forward $(kubectl get pod -l app=grafana -o jsonpath={.items[0].metadata.name} -n monitoring) 8080:3000 -n monitoring + Forwarding from 127.0.0.1:8080 -> 3000 + Forwarding from [::1]:8080 -> 3000 + +The dashboard will be available at https://.vfs.cloud9..amazonaws.com/. + +The complete list of dashboards is available using the search button at the top: image::monitoring-grafana-prometheus-dashboard-dashboard-home.png[] @@ -316,6 +336,8 @@ Convenient link for other dashboards are listed below: * http://localhost:3000/dashboard/db/kubernetes-resource-requests?orgId=1 * http://localhost:3000/dashboard/db/pods?orgId=1 +(For Cloud9 users, just replace `http://localhost:3000/` by `https://.vfs.cloud9..amazonaws.com/` + === Cleanup Remove all the installed components: diff --git a/02-path-working-with-clusters/201-cluster-monitoring/templates/prometheus/prometheus-bundle.yaml b/02-path-working-with-clusters/201-cluster-monitoring/templates/prometheus/prometheus-bundle.yaml index 3f435f47..d59e3144 100644 --- a/02-path-working-with-clusters/201-cluster-monitoring/templates/prometheus/prometheus-bundle.yaml +++ b/02-path-working-with-clusters/201-cluster-monitoring/templates/prometheus/prometheus-bundle.yaml @@ -97,7 +97,7 @@ spec: - args: - --kubelet-service=kube-system/kubelet - --config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1 - image: quay.io/coreos/prometheus-operator:v0.14.1 + image: quay.io/coreos/prometheus-operator:v0.21.0 name: prometheus-operator ports: - containerPort: 8080 diff --git a/02-path-working-with-clusters/201-cluster-monitoring/templates/prometheus/prometheus.yaml b/02-path-working-with-clusters/201-cluster-monitoring/templates/prometheus/prometheus.yaml index 6d6c0255..589e23d3 100644 --- a/02-path-working-with-clusters/201-cluster-monitoring/templates/prometheus/prometheus.yaml +++ b/02-path-working-with-clusters/201-cluster-monitoring/templates/prometheus/prometheus.yaml @@ -160,7 +160,7 @@ spec: serviceAccountName: kube-state-metrics containers: - name: kube-state-metrics - image: quay.io/coreos/kube-state-metrics:v1.0.1 + image: quay.io/coreos/kube-state-metrics:v1.3.1 ports: - name: metrics containerPort: 8080 @@ -171,7 +171,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: addon-resizer - image: k8s.gcr.io/addon-resizer:1.0 + image: k8s.gcr.io/addon-resizer:1.7 resources: limits: cpu: 100m @@ -225,7 +225,7 @@ metadata: spec: replicas: 2 version: v2.0.0-rc.1 - serviceAccountName: prometheus-operator + serviceAccountName: prometheus serviceMonitorSelector: matchExpressions: - {key: k8s-app, operator: Exists} @@ -246,6 +246,45 @@ spec: name: alertmanager-main port: web --- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: prometheus + namespace: monitoring +rules: +- apiGroups: [""] + resources: + - nodes + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus + namespace: monitoring +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus + namespace: monitoring +--- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: