Add prometheus operator
This commit is contained in:
parent
386b972553
commit
585ec5c4f3
23
argocd-apps-operators/prometheus-operator.yaml
Normal file
23
argocd-apps-operators/prometheus-operator.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: prometheus-operator
|
||||
namespace: argocd
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: 'https://github.com/your-org/observability-stack.git'
|
||||
targetRevision: HEAD
|
||||
path: charts/prometheus-operator
|
||||
helm:
|
||||
valueFiles:
|
||||
- ../../manifests/prometheus-operator/values.yaml
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: monitoring
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
selfHeal: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
3
charts/kube-prometheus-stack/.editorconfig
Normal file
3
charts/kube-prometheus-stack/.editorconfig
Normal file
@ -0,0 +1,3 @@
|
||||
[files/dashboards/*.json]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
12
charts/kube-prometheus-stack/.gitignore
vendored
Normal file
12
charts/kube-prometheus-stack/.gitignore
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
# Python development for hack
|
||||
venv
|
||||
pyvenv.cfg
|
||||
!**
|
||||
charts/*
|
||||
!charts/crds/
|
||||
!charts/crds/**
|
||||
Chart.lock
|
||||
hack/*.git
|
||||
hack/tmp/
|
||||
hack/venv/
|
||||
hack/pyvenv.cfg
|
||||
33
charts/kube-prometheus-stack/.helmignore
Normal file
33
charts/kube-prometheus-stack/.helmignore
Normal file
@ -0,0 +1,33 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
# helm/charts
|
||||
OWNERS
|
||||
hack/
|
||||
ci/
|
||||
kube-prometheus-*.tgz
|
||||
|
||||
unittests/
|
||||
files/dashboards/
|
||||
|
||||
UPGRADE.md
|
||||
CONTRIBUTING.md
|
||||
.editorconfig
|
||||
12
charts/kube-prometheus-stack/CONTRIBUTING.md
Normal file
12
charts/kube-prometheus-stack/CONTRIBUTING.md
Normal file
@ -0,0 +1,12 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
## How to contribute to this chart
|
||||
|
||||
1. Fork this repository, develop and test your Chart.
|
||||
1. Bump the chart version for every change.
|
||||
1. Ensure PR title has the prefix `[kube-prometheus-stack]`
|
||||
1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories
|
||||
1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes.
|
||||
1. Check for changes of RBAC rules.
|
||||
1. Check for changes in CRD specs.
|
||||
1. PR must pass the linter (`helm lint`)
|
||||
72
charts/kube-prometheus-stack/Chart.yaml
Normal file
72
charts/kube-prometheus-stack/Chart.yaml
Normal file
@ -0,0 +1,72 @@
|
||||
apiVersion: v2
|
||||
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator.
|
||||
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
|
||||
type: application
|
||||
maintainers:
|
||||
- name: andrewgkew
|
||||
email: andrew@quadcorps.co.uk
|
||||
url: https://github.com/andrewgkew
|
||||
- name: gianrubio
|
||||
email: gianrubio@gmail.com
|
||||
url: https://github.com/gianrubio
|
||||
- name: gkarthiks
|
||||
email: github.gkarthiks@gmail.com
|
||||
url: https://github.com/gkarthiks
|
||||
- name: GMartinez-Sisti
|
||||
email: kube-prometheus-stack@sisti.pt
|
||||
url: https://github.com/GMartinez-Sisti
|
||||
- name: jkroepke
|
||||
email: github@jkroepke.de
|
||||
url: https://github.com/jkroepke
|
||||
- name: scottrigby
|
||||
email: scott@r6by.com
|
||||
url: https://github.com/scottrigby
|
||||
- name: Xtigyro
|
||||
email: miroslav.hadzhiev@gmail.com
|
||||
url: https://github.com/Xtigyro
|
||||
- name: QuentinBisson
|
||||
email: quentin.bisson@gmail.com
|
||||
url: https://github.com/QuentinBisson
|
||||
name: kube-prometheus-stack
|
||||
sources:
|
||||
- https://github.com/prometheus-community/helm-charts
|
||||
- https://github.com/prometheus-operator/kube-prometheus
|
||||
version: 72.6.2
|
||||
# Please do not add a renovate hint here, since appVersion updates involves manual tasks
|
||||
appVersion: v0.82.2
|
||||
kubeVersion: ">=1.19.0-0"
|
||||
home: https://github.com/prometheus-operator/kube-prometheus
|
||||
keywords:
|
||||
- operator
|
||||
- prometheus
|
||||
- kube-prometheus
|
||||
annotations:
|
||||
"artifacthub.io/license": Apache-2.0
|
||||
"artifacthub.io/operator": "true"
|
||||
"artifacthub.io/links": |
|
||||
- name: Chart Source
|
||||
url: https://github.com/prometheus-community/helm-charts
|
||||
- name: Upstream Project
|
||||
url: https://github.com/prometheus-operator/kube-prometheus
|
||||
- name: Upgrade Process
|
||||
url: https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#upgrading-chart
|
||||
dependencies:
|
||||
- name: crds
|
||||
version: "0.0.0"
|
||||
condition: crds.enabled
|
||||
- name: kube-state-metrics
|
||||
version: "5.33.*"
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
condition: kubeStateMetrics.enabled
|
||||
- name: prometheus-node-exporter
|
||||
version: "4.46.0"
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
condition: nodeExporter.enabled
|
||||
- name: grafana
|
||||
version: "9.0.0"
|
||||
repository: https://grafana.github.io/helm-charts
|
||||
condition: grafana.enabled
|
||||
- name: prometheus-windows-exporter
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
version: "0.10.*"
|
||||
condition: windowsMonitoring.enabled
|
||||
346
charts/kube-prometheus-stack/README.md
Normal file
346
charts/kube-prometheus-stack/README.md
Normal file
@ -0,0 +1,346 @@
|
||||
# kube-prometheus-stack
|
||||
|
||||
Installs core components of the [kube-prometheus stack](https://github.com/prometheus-operator/kube-prometheus), a collection of Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with [Prometheus](https://prometheus.io/) using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator).
|
||||
|
||||
See the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) readme for details about components, dashboards, and alerts.
|
||||
|
||||
_Note: This chart was formerly named `prometheus-operator` chart, now renamed to more clearly reflect that it installs the `kube-prometheus` project stack, within which Prometheus Operator is only one component. This chart does not install all components of `kube-prometheus`, notably excluding the Prometheus Adapter and Prometheus black-box exporter._
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.19+
|
||||
- Helm 3+
|
||||
|
||||
## Get Helm Repository Info
|
||||
|
||||
```console
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
helm repo update
|
||||
```
|
||||
|
||||
_See [`helm repo`](https://helm.sh/docs/helm/helm_repo/) for command documentation._
|
||||
|
||||
## Install Helm Chart
|
||||
|
||||
```console
|
||||
helm install [RELEASE_NAME] prometheus-community/kube-prometheus-stack
|
||||
```
|
||||
|
||||
_See [configuration](#configuration) below._
|
||||
|
||||
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
|
||||
|
||||
## Dependencies
|
||||
|
||||
By default this chart installs additional, dependent charts:
|
||||
|
||||
- [prometheus-community/kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics)
|
||||
- [prometheus-community/prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter)
|
||||
- [grafana/grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana)
|
||||
|
||||
To disable dependencies during installation, see [multiple releases](#multiple-releases) below.
|
||||
|
||||
_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._
|
||||
|
||||
## Uninstall Helm Chart
|
||||
|
||||
```console
|
||||
helm uninstall [RELEASE_NAME]
|
||||
```
|
||||
|
||||
This removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
|
||||
|
||||
CRDs created by this chart are not removed by default and should be manually cleaned up:
|
||||
|
||||
```console
|
||||
kubectl delete crd alertmanagerconfigs.monitoring.coreos.com
|
||||
kubectl delete crd alertmanagers.monitoring.coreos.com
|
||||
kubectl delete crd podmonitors.monitoring.coreos.com
|
||||
kubectl delete crd probes.monitoring.coreos.com
|
||||
kubectl delete crd prometheusagents.monitoring.coreos.com
|
||||
kubectl delete crd prometheuses.monitoring.coreos.com
|
||||
kubectl delete crd prometheusrules.monitoring.coreos.com
|
||||
kubectl delete crd scrapeconfigs.monitoring.coreos.com
|
||||
kubectl delete crd servicemonitors.monitoring.coreos.com
|
||||
kubectl delete crd thanosrulers.monitoring.coreos.com
|
||||
```
|
||||
|
||||
## Upgrading Chart
|
||||
|
||||
```console
|
||||
helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack
|
||||
```
|
||||
|
||||
With Helm v3, CRDs created by this chart are not updated by default and should be manually updated.
|
||||
Consult also the [Helm Documentation on CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions).
|
||||
|
||||
CRDs update lead to a major version bump.
|
||||
The Chart's [appVersion](https://github.com/prometheus-community/helm-charts/blob/13ed7098db2f78c2bbcdab6c1c3c7a95b4b94574/charts/kube-prometheus-stack/Chart.yaml#L36) refers to the [`prometheus-operator`](https://github.com/prometheus-operator/prometheus-operator/tree/main)'s version with matching CRDs.
|
||||
|
||||
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
|
||||
|
||||
### Upgrading an existing Release to a new major version
|
||||
|
||||
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
|
||||
|
||||
See [UPGRADE.md](https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/UPGRADE.md)
|
||||
for breaking changes between versions.
|
||||
|
||||
## Configuration
|
||||
|
||||
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments:
|
||||
|
||||
```console
|
||||
helm show values prometheus-community/kube-prometheus-stack
|
||||
```
|
||||
|
||||
You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options.
|
||||
|
||||
### Multiple releases
|
||||
|
||||
The same chart can be used to run multiple Prometheus instances in the same cluster if required. To achieve this, it is necessary to run only one instance of prometheus-operator and a pair of alertmanager pods for an HA configuration, while all other components need to be disabled. To disable a dependency during installation, set `kubeStateMetrics.enabled`, `nodeExporter.enabled` and `grafana.enabled` to `false`.
|
||||
|
||||
## Work-Arounds for Known Issues
|
||||
|
||||
### Running on private GKE clusters
|
||||
|
||||
When Google configure the control plane for private clusters, they automatically configure VPC peering between your Kubernetes cluster’s network and a separate Google managed project. In order to restrict what Google are able to access within your cluster, the firewall rules configured restrict access to your Kubernetes pods. This means that in order to use the webhook component with a GKE private cluster, you must configure an additional firewall rule to allow the GKE control plane access to your webhook pod.
|
||||
|
||||
You can read more information on how to add firewall rules for the GKE control plane nodes in the [GKE docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules)
|
||||
|
||||
Alternatively, you can disable the hooks by setting `prometheusOperator.admissionWebhooks.enabled=false`.
|
||||
|
||||
## PrometheusRules Admission Webhooks
|
||||
|
||||
With Prometheus Operator version 0.30+, the core Prometheus Operator pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent malformed rules from being added to the cluster.
|
||||
|
||||
### How the Chart Configures the Hooks
|
||||
|
||||
A validating and mutating webhook configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks.
|
||||
|
||||
1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end user certificates. If the certificate already exists, the hook exits.
|
||||
2. The prometheus operator pod is configured to use a TLS proxy container, which will load that certificate.
|
||||
3. Validating and Mutating webhook configurations are created in the cluster, with their failure mode set to Ignore. This allows rules to be created by the same chart at the same time, even though the webhook has not yet been fully set up - it does not have the correct CA field set.
|
||||
4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations
|
||||
|
||||
### Alternatives
|
||||
|
||||
It should be possible to use [jetstack/cert-manager](https://github.com/jetstack/cert-manager) if a more complete solution is required, but it has not been tested.
|
||||
|
||||
You can enable automatic self-signed TLS certificate provisioning via cert-manager by setting the `prometheusOperator.admissionWebhooks.certManager.enabled` value to true.
|
||||
|
||||
### Limitations
|
||||
|
||||
Because the operator can only run as a single pod, there is potential for this component failure to cause rule deployment failure. Because this risk is outweighed by the benefit of having validation, the feature is enabled by default.
|
||||
|
||||
## Developing Prometheus Rules and Grafana Dashboards
|
||||
|
||||
This chart Grafana Dashboards and Prometheus Rules are just a copy from [prometheus-operator/prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to the original repository](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/customizations/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts.
|
||||
|
||||
## Further Information
|
||||
|
||||
For more in-depth documentation of configuration options meanings, please see
|
||||
|
||||
- [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)
|
||||
- [Prometheus](https://prometheus.io/docs/introduction/overview/)
|
||||
- [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana#grafana-helm-chart)
|
||||
|
||||
## prometheus.io/scrape
|
||||
|
||||
The prometheus operator does not support annotation-based discovery of services, using the `PodMonitor` or `ServiceMonitor` CRD in its place as they provide far more configuration options.
|
||||
For information on how to use PodMonitors/ServiceMonitors, please see the documentation on the `prometheus-operator/prometheus-operator` documentation here:
|
||||
|
||||
- [ServiceMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/developer/getting-started.md#using-servicemonitors)
|
||||
- [PodMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/developer/getting-started.md#using-podmonitors)
|
||||
- [Running Exporters](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/running-exporters.md)
|
||||
|
||||
By default, Prometheus discovers PodMonitors and ServiceMonitors within its namespace, that are labeled with the same release tag as the prometheus-operator release.
|
||||
Sometimes, you may need to discover custom PodMonitors/ServiceMonitors, for example used to scrape data from third-party applications.
|
||||
An easy way of doing this, without compromising the default PodMonitors/ServiceMonitors discovery, is allowing Prometheus to discover all PodMonitors/ServiceMonitors within its namespace, without applying label filtering.
|
||||
To do so, you can set `prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues` and `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` to `false`.
|
||||
|
||||
## Migrating from stable/prometheus-operator chart
|
||||
|
||||
## Zero downtime
|
||||
|
||||
Since `kube-prometheus-stack` is fully compatible with the `stable/prometheus-operator` chart, a migration without downtime can be achieved.
|
||||
However, the old name prefix needs to be kept. If you want the new name please follow the step by step guide below (with downtime).
|
||||
|
||||
You can override the name to achieve this:
|
||||
|
||||
```console
|
||||
helm upgrade prometheus-operator prometheus-community/kube-prometheus-stack -n monitoring --reuse-values --set nameOverride=prometheus-operator
|
||||
```
|
||||
|
||||
**Note**: It is recommended to run this first with `--dry-run --debug`.
|
||||
|
||||
## Redeploy with new name (downtime)
|
||||
|
||||
If the **prometheus-operator** values are compatible with the new **kube-prometheus-stack** chart, please follow the below steps for migration:
|
||||
|
||||
> The guide presumes that chart is deployed in `monitoring` namespace and the deployments are running there. If in other namespace, please replace the `monitoring` to the deployed namespace.
|
||||
|
||||
1. Patch the PersistenceVolume created/used by the prometheus-operator chart to `Retain` claim policy:
|
||||
|
||||
```console
|
||||
kubectl patch pv/<PersistentVolume name> -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
|
||||
```
|
||||
|
||||
**Note:** To execute the above command, the user must have a cluster wide permission. Please refer [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
|
||||
|
||||
2. Uninstall the **prometheus-operator** release and delete the existing PersistentVolumeClaim, and verify PV become Released.
|
||||
|
||||
```console
|
||||
helm uninstall prometheus-operator -n monitoring
|
||||
kubectl delete pvc/<PersistenceVolumeClaim name> -n monitoring
|
||||
```
|
||||
|
||||
Additionally, you have to manually remove the remaining `prometheus-operator-kubelet` service.
|
||||
|
||||
```console
|
||||
kubectl delete service/prometheus-operator-kubelet -n kube-system
|
||||
```
|
||||
|
||||
You can choose to remove all your existing CRDs (ServiceMonitors, Podmonitors, etc.) if you want to.
|
||||
|
||||
3. Remove current `spec.claimRef` values to change the PV's status from Released to Available.
|
||||
|
||||
```console
|
||||
kubectl patch pv/<PersistentVolume name> --type json -p='[{"op": "remove", "path": "/spec/claimRef"}]' -n monitoring
|
||||
```
|
||||
|
||||
**Note:** To execute the above command, the user must have a cluster wide permission. Please refer to [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
|
||||
|
||||
After these steps, proceed to a fresh **kube-prometheus-stack** installation and make sure the current release of **kube-prometheus-stack** matching the `volumeClaimTemplate` values in the `values.yaml`.
|
||||
|
||||
The binding is done via matching a specific amount of storage requested and with certain access modes.
|
||||
|
||||
For example, if you had storage specified as this with **prometheus-operator**:
|
||||
|
||||
```yaml
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
storageClassName: gp2
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 50Gi
|
||||
```
|
||||
|
||||
You have to specify matching `volumeClaimTemplate` with 50Gi storage and `ReadWriteOnce` access mode.
|
||||
|
||||
Additionally, you should check the current AZ of your legacy installation's PV, and configure the fresh release to use the same AZ as the old one. If the pods are in a different AZ than the PV, the release will fail to bind the existing one, hence creating a new PV.
|
||||
|
||||
This can be achieved either by specifying the labels through `values.yaml`, e.g. setting `prometheus.prometheusSpec.nodeSelector` to:
|
||||
|
||||
```yaml
|
||||
nodeSelector:
|
||||
failure-domain.beta.kubernetes.io/zone: east-west-1a
|
||||
```
|
||||
|
||||
or passing these values as `--set` overrides during installation.
|
||||
|
||||
The new release should now re-attach your previously released PV with its content.
|
||||
|
||||
## Migrating from coreos/prometheus-operator chart
|
||||
|
||||
The multiple charts have been combined into a single chart that installs prometheus operator, prometheus, alertmanager, grafana as well as the multitude of exporters necessary to monitor a cluster.
|
||||
|
||||
There is no simple and direct migration path between the charts as the changes are extensive and intended to make the chart easier to support.
|
||||
|
||||
The capabilities of the old chart are all available in the new chart, including the ability to run multiple prometheus instances on a single cluster - you will need to disable the parts of the chart you do not wish to deploy.
|
||||
|
||||
You can check out the tickets for this change [here](https://github.com/prometheus-operator/prometheus-operator/issues/592) and [here](https://github.com/helm/charts/pull/6765).
|
||||
|
||||
### High-level overview of Changes
|
||||
|
||||
#### Added dependencies
|
||||
|
||||
The chart has added 3 [dependencies](#dependencies).
|
||||
|
||||
- Node-Exporter, Kube-State-Metrics: These components are loaded as dependencies into the chart, and are relatively simple components
|
||||
- Grafana: The Grafana chart is more feature-rich than this chart - it contains a sidecar that is able to load data sources and dashboards from configmaps deployed into the same cluster. For more information check out the [documentation for the chart](https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md)
|
||||
|
||||
#### Kubelet Service
|
||||
|
||||
Because the kubelet service has a new name in the chart, make sure to clean up the old kubelet service in the `kube-system` namespace to prevent counting container metrics twice.
|
||||
|
||||
#### Persistent Volumes
|
||||
|
||||
If you would like to keep the data of the current persistent volumes, it should be possible to attach existing volumes to new PVCs and PVs that are created using the conventions in the new chart. For example, in order to use an existing Azure disk for a helm release called `prometheus-migration` the following resources can be created:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: pvc-prometheus-migration-prometheus-0
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
azureDisk:
|
||||
cachingMode: None
|
||||
diskName: pvc-prometheus-migration-prometheus-0
|
||||
diskURI: /subscriptions/f5125d82-2622-4c50-8d25-3f7ba3e9ac4b/resourceGroups/sample-migration-resource-group/providers/Microsoft.Compute/disks/pvc-prometheus-migration-prometheus-0
|
||||
fsType: ""
|
||||
kind: Managed
|
||||
readOnly: false
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
storageClassName: prometheus
|
||||
volumeMode: Filesystem
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: prometheus
|
||||
prometheus: prometheus-migration-prometheus
|
||||
name: prometheus-prometheus-migration-prometheus-db-prometheus-prometheus-migration-prometheus-0
|
||||
namespace: monitoring
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: prometheus
|
||||
volumeMode: Filesystem
|
||||
volumeName: pvc-prometheus-migration-prometheus-0
|
||||
```
|
||||
|
||||
The PVC will take ownership of the PV and when you create a release using a persistent volume claim template it will use the existing PVCs as they match the naming convention used by the chart. For other cloud providers similar approaches can be used.
|
||||
|
||||
#### KubeProxy
|
||||
|
||||
The metrics bind address of kube-proxy is default to `127.0.0.1:10249` that prometheus instances **cannot** access to. You should expose metrics by changing `metricsBindAddress` field value to `0.0.0.0:10249` if you want to collect them.
|
||||
|
||||
Depending on the cluster, the relevant part `config.conf` will be in ConfigMap `kube-system/kube-proxy` or `kube-system/kube-proxy-config`. For example:
|
||||
|
||||
```console
|
||||
kubectl -n kube-system edit cm kube-proxy
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
config.conf: |-
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
# ...
|
||||
# metricsBindAddress: 127.0.0.1:10249
|
||||
metricsBindAddress: 0.0.0.0:10249
|
||||
# ...
|
||||
kubeconfig.conf: |-
|
||||
# ...
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
app: kube-proxy
|
||||
name: kube-proxy
|
||||
namespace: kube-system
|
||||
```
|
||||
969
charts/kube-prometheus-stack/UPGRADE.md
Normal file
969
charts/kube-prometheus-stack/UPGRADE.md
Normal file
@ -0,0 +1,969 @@
|
||||
# Upgrade
|
||||
|
||||
## From 71.x to 72.x
|
||||
|
||||
This version adds an `enabled` flag to the `prometheusOperator.admissionWebhooks.deployment.podDisruptionBudget` settings. Users who want this chart to deploy a `podDisruptionBudget` must now set the flag `podDisruptionBudget.enabled` to `true` for each `podDisruptionBudget` resource to be created.
|
||||
|
||||
## From 70.x to 71.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.82.0
|
||||
Since [68.4.0](https://github.com/prometheus-community/helm-charts/pull/5175) it is also possible to use `crds.upgradeJob.enabled` for upgrading the CRDs.
|
||||
For traditional upgrades, please run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.82.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.82.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.82.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.82.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.82.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.82.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.82.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.82.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.82.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.82.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 69.x to 70.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.81.0 now supporting serviceName for Prometheus/AM/ThanosRuler CRDs. If empty there is no changes to the creation of an operated service. If defined operated service will not be created and new serviceName will be used as governing service.
|
||||
|
||||
Since [68.4.0](https://github.com/prometheus-community/helm-charts/pull/5175) it is also possible to use `crds.upgradeJob.enabled` for upgrading the CRDs.
|
||||
For traditional upgrades, please run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.81.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 68.x to 69.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.80.0
|
||||
Since [68.4.0](https://github.com/prometheus-community/helm-charts/pull/5175) it is also possible to use `crds.upgradeJob.enabled` for upgrading the CRDs.
|
||||
For traditional upgrades, please run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.80.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 67.x to 68.x
|
||||
|
||||
This version drops several metrics by default in order to reduce unnecessary cardinality.
|
||||
|
||||
This version also fixes histogram bucket matching for Prometheus 3.x.
|
||||
|
||||
From `{job="apiserver"}` drop excessive histogram buckets for the following metrics:
|
||||
|
||||
- `apiserver_request_sli_duration_seconds_bucket`
|
||||
- `apiserver_request_slo_duration_seconds_bucket`
|
||||
- `etcd_request_duration_seconds_bucket`
|
||||
|
||||
From `{job="kubelet",metrics_path="/metrics"}` reduce bucket cardinality of kubelet storage operations:
|
||||
|
||||
- `csi_operations_seconds_bucket`
|
||||
- `storage_operation_duration_seconds_bucket`
|
||||
|
||||
From `{job="kubelet",metrics_path="/metrics/cadvisor"}`:
|
||||
|
||||
- Drop `container_memory_failures_total{scope="hierarchy"}` metrics, we only need the container scope here.
|
||||
- Drop `container_network_...` metrics that match various interfaces correspond to CNI and similar interfaces.
|
||||
|
||||
## From 66.x to 67.x
|
||||
|
||||
This version upgrades Prometheus Image to v3.0.1 as it is the default version starting with operator version v0.79.0
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 65.x to 66.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.78.1
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.1/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 64.x to 65.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.77.1
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.77.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.77.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.77.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.77.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.77.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.77.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.77.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.77.1/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.77.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.77.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 63.x to 64.x
|
||||
|
||||
v64 reverts the v63 release.
|
||||
|
||||
All changes mentioned in the v63 release notes must be reverted.
|
||||
|
||||
## From 62.x to 63.x
|
||||
|
||||
Simplify setting empty selectors, by deprecating `*SelectorNilUsesHelmValues` properties.
|
||||
Instead, setting `*Selector.matchLabels=null` will create an empty selector.
|
||||
|
||||
If you set one of the following properties to `false`, you will have to convert them:
|
||||
|
||||
- `prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues`
|
||||
- `prometheus.prometheusSpec.probeSelectorNilUsesHelmValues`
|
||||
- `prometheus.prometheusSpec.ruleSelectorNilUsesHelmValues`
|
||||
- `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues`
|
||||
- `prometheus.prometheusSpec.scrapeConfigSelectorNilUsesHelmValues`
|
||||
- `thanosRuler.thanosRulerSpec.ruleSelectorNilUsesHelmValues`
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
prometheus:
|
||||
prometheusSpec:
|
||||
scrapeConfigSelectorNilUsesHelmValues: false
|
||||
```
|
||||
|
||||
Becomes:
|
||||
|
||||
```yaml
|
||||
prometheus:
|
||||
prometheusSpec:
|
||||
scrapeConfigSelector:
|
||||
matchLabels: null
|
||||
```
|
||||
|
||||
Note that `externalPrefixNilUsesHelmValues` remains as is.
|
||||
|
||||
## From 61.x to 62.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.76.0
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.76.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.76.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.76.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.76.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.76.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.76.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.76.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.76.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.76.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.76.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 60.x to 61.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.75.0
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.75.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.75.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.75.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.75.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.75.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.75.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.75.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.75.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.75.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.75.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 59.x to 60.x
|
||||
|
||||
This version upgrades the Grafana chart to v8.0.x which introduces Grafana 11. This new major version of Grafana contains some breaking changes described in [Breaking changes in Grafana v11.0](https://grafana.com/docs/grafana/latest/breaking-changes/breaking-changes-v11-0/).
|
||||
|
||||
## From 58.x to 59.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.74.0
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.74.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.74.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.74.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.74.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.74.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.74.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.74.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.74.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.74.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.74.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 57.x to 58.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.73.0
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 56.x to 57.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.72.0
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 55.x to 56.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.71.0, Prometheus to 2.49.1
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 54.x to 55.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.70.0
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 53.x to 54.x
|
||||
|
||||
Grafana Helm Chart has bumped to version 7
|
||||
|
||||
Please note Grafana Helm Chart [changelog](https://github.com/grafana/helm-charts/tree/main/charts/grafana#to-700).
|
||||
|
||||
## From 52.x to 53.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.69.1, Prometheus to 2.47.2
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 51.x to 52.x
|
||||
|
||||
This includes the ability to select between using existing secrets or create new secret objects for various thanos config. The defaults have not changed but if you were setting:
|
||||
|
||||
- `thanosRuler.thanosRulerSpec.alertmanagersConfig` or
|
||||
- `thanosRuler.thanosRulerSpec.objectStorageConfig` or
|
||||
- `thanosRuler.thanosRulerSpec.queryConfig` or
|
||||
- `prometheus.prometheusSpec.thanos.objectStorageConfig`
|
||||
|
||||
you will have to need to set `existingSecret` or `secret` based on your requirement
|
||||
|
||||
For instance, the `thanosRuler.thanosRulerSpec.alertmanagersConfig` used to be configured as follow:
|
||||
|
||||
```yaml
|
||||
thanosRuler:
|
||||
thanosRulerSpec:
|
||||
alertmanagersConfig:
|
||||
alertmanagers:
|
||||
- api_version: v2
|
||||
http_config:
|
||||
basic_auth:
|
||||
username: some_user
|
||||
password: some_pass
|
||||
static_configs:
|
||||
- alertmanager.thanos.io
|
||||
scheme: http
|
||||
timeout: 10s
|
||||
```
|
||||
|
||||
But it now moved to:
|
||||
|
||||
```yaml
|
||||
thanosRuler:
|
||||
thanosRulerSpec:
|
||||
alertmanagersConfig:
|
||||
secret:
|
||||
alertmanagers:
|
||||
- api_version: v2
|
||||
http_config:
|
||||
basic_auth:
|
||||
username: some_user
|
||||
password: some_pass
|
||||
static_configs:
|
||||
- alertmanager.thanos.io
|
||||
scheme: http
|
||||
timeout: 10s
|
||||
```
|
||||
|
||||
or the `thanosRuler.thanosRulerSpec.objectStorageConfig` used to be configured as follow:
|
||||
|
||||
```yaml
|
||||
thanosRuler:
|
||||
thanosRulerSpec:
|
||||
objectStorageConfig:
|
||||
name: existing-secret-not-created-by-this-chart
|
||||
key: object-storage-configs.yaml
|
||||
```
|
||||
|
||||
But it now moved to:
|
||||
|
||||
```yaml
|
||||
thanosRuler:
|
||||
thanosRulerSpec:
|
||||
objectStorageConfig:
|
||||
existingSecret:
|
||||
name: existing-secret-not-created-by-this-chart
|
||||
key: object-storage-configs.yaml
|
||||
```
|
||||
|
||||
## From 50.x to 51.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.68.0, Prometheus to 2.47.0 and Thanos to v0.32.2
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 49.x to 50.x
|
||||
|
||||
This version requires Kubernetes 1.19+.
|
||||
|
||||
We do not expect any breaking changes in this version.
|
||||
|
||||
## From 48.x to 49.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.67.1, 0, Alertmanager to v0.26.0, Prometheus to 2.46.0 and Thanos to v0.32.0
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 47.x to 48.x
|
||||
|
||||
This version moved all CRDs into a dedicated sub-chart. No new CRDs are introduced in this version.
|
||||
See [#3548](https://github.com/prometheus-community/helm-charts/issues/3548) for more context.
|
||||
|
||||
We do not expect any breaking changes in this version.
|
||||
|
||||
## From 46.x to 47.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.66.0 with new CRDs (PrometheusAgent and ScrapeConfig).
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 45.x to 46.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.65.1 with new CRDs (PrometheusAgent and ScrapeConfig), Prometheus to v2.44.0 and Thanos to v0.31.0.
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 44.x to 45.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.63.0, Prometheus to v2.42.0 and Thanos to v0.30.2.
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 43.x to 44.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.62.0, Prometheus to v2.41.0 and Thanos to v0.30.1.
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
If you have explicitly set `prometheusOperator.admissionWebhooks.failurePolicy`, this value is now always used even when `.prometheusOperator.admissionWebhooks.patch.enabled` is `true` (the default).
|
||||
|
||||
The values for `prometheusOperator.image.tag` & `prometheusOperator.prometheusConfigReloader.image.tag` are now empty by default and the Chart.yaml `appVersion` field is used instead.
|
||||
|
||||
## From 42.x to 43.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.61.1, Prometheus to v2.40.5 and Thanos to v0.29.0.
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 41.x to 42.x
|
||||
|
||||
This includes the overridability of container registry for all containers at the global level using `global.imageRegistry` or per container image. The defaults have not changed but if you were using a custom image, you will have to override the registry of said custom container image before you upgrade.
|
||||
|
||||
For instance, the prometheus-config-reloader used to be configured as follow:
|
||||
|
||||
```yaml
|
||||
image:
|
||||
repository: quay.io/prometheus-operator/prometheus-config-reloader
|
||||
tag: v0.60.1
|
||||
sha: ""
|
||||
```
|
||||
|
||||
But it now moved to:
|
||||
|
||||
```yaml
|
||||
image:
|
||||
registry: quay.io
|
||||
repository: prometheus-operator/prometheus-config-reloader
|
||||
tag: v0.60.1
|
||||
sha: ""
|
||||
```
|
||||
|
||||
## From 40.x to 41.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.60.1, Prometheus to v2.39.1 and Thanos to v0.28.1.
|
||||
This version also upgrades the Helm charts of kube-state-metrics to 4.20.2, prometheus-node-exporter to 4.3.0 and Grafana to 6.40.4.
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
This version splits kubeScheduler recording and altering rules in separate config values.
|
||||
Instead of `defaultRules.rules.kubeScheduler` the 2 new variables `defaultRules.rules.kubeSchedulerAlerting` and `defaultRules.rules.kubeSchedulerRecording` are used.
|
||||
|
||||
## From 39.x to 40.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.59.1, Prometheus to v2.38.0, kube-state-metrics to v2.6.0 and Thanos to v0.28.0.
|
||||
This version also upgrades the Helm charts of kube-state-metrics to 4.18.0 and prometheus-node-exporter to 4.2.0.
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
Starting from prometheus-node-exporter version 4.0.0, the `node exporter` chart is using the [Kubernetes recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/). Therefore you have to delete the daemonset before you upgrade.
|
||||
|
||||
```console
|
||||
kubectl delete daemonset -l app=prometheus-node-exporter
|
||||
helm upgrade -i kube-prometheus-stack prometheus-community/kube-prometheus-stack
|
||||
```
|
||||
|
||||
If you use your own custom [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor) or [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#podmonitor), please ensure to upgrade their `selector` fields accordingly to the new labels.
|
||||
|
||||
## From 38.x to 39.x
|
||||
|
||||
This upgraded prometheus-operator to v0.58.0 and prometheus to v2.37.0
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 37.x to 38.x
|
||||
|
||||
Reverted one of the default metrics relabelings for cAdvisor added in 36.x, due to it breaking container_network_* and various other statistics. If you do not want this change, you will need to override the `kubelet.cAdvisorMetricRelabelings`.
|
||||
|
||||
## From 36.x to 37.x
|
||||
|
||||
This includes some default metric relabelings for cAdvisor and apiserver metrics to reduce cardinality. If you do not want these defaults, you will need to override the `kubeApiServer.metricRelabelings` and or `kubelet.cAdvisorMetricRelabelings`.
|
||||
|
||||
## From 35.x to 36.x
|
||||
|
||||
This upgraded prometheus-operator to v0.57.0 and prometheus to v2.36.1
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 34.x to 35.x
|
||||
|
||||
This upgraded prometheus-operator to v0.56.0 and prometheus to v2.35.0
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 33.x to 34.x
|
||||
|
||||
This upgrades to prometheus-operator to v0.55.0 and prometheus to v2.33.5.
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 32.x to 33.x
|
||||
|
||||
This upgrades the prometheus-node-exporter Chart to v3.0.0. Please review the changes to this subchart if you make customizations to hostMountPropagation.
|
||||
|
||||
## From 31.x to 32.x
|
||||
|
||||
This upgrades to prometheus-operator to v0.54.0 and prometheus to v2.33.1. It also changes the default for `grafana.serviceMonitor.enabled` to `true.
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 30.x to 31.x
|
||||
|
||||
This version removes the built-in grafana ServiceMonitor and instead relies on the ServiceMonitor of the sub-chart.
|
||||
`grafana.serviceMonitor.enabled` must be set instead of `grafana.serviceMonitor.selfMonitor` and the old ServiceMonitor may
|
||||
need to be manually cleaned up after deploying the new release.
|
||||
|
||||
## From 29.x to 30.x
|
||||
|
||||
This version updates kube-state-metrics to 4.3.0 and uses the new option `kube-state-metrics.releaseLabel=true` which adds the "release" label to kube-state-metrics labels, making scraping of the metrics by kube-prometheus-stack work out of the box again, independent of the used kube-prometheus-stack release name. If you already set the "release" label via `kube-state-metrics.customLabels` you might have to remove that and use it via the new option.
|
||||
|
||||
## From 28.x to 29.x
|
||||
|
||||
This version makes scraping port for kube-controller-manager and kube-scheduler dynamic to reflect changes to default serving ports
|
||||
for those components in Kubernetes versions v1.22 and v1.23 respectively.
|
||||
|
||||
If you deploy on clusters using version v1.22+, kube-controller-manager will be scraped over HTTPS on port 10257.
|
||||
|
||||
If you deploy on clusters running version v1.23+, kube-scheduler will be scraped over HTTPS on port 10259.
|
||||
|
||||
## From 27.x to 28.x
|
||||
|
||||
This version disables PodSecurityPolicies by default because they are deprecated in Kubernetes 1.21 and will be removed in Kubernetes 1.25.
|
||||
|
||||
If you are using PodSecurityPolicies you can enable the previous behaviour by setting `kube-state-metrics.podSecurityPolicy.enabled`, `prometheus-node-exporter.rbac.pspEnabled`, `grafana.rbac.pspEnabled` and `global.rbac.pspEnabled` to `true`.
|
||||
|
||||
## From 26.x to 27.x
|
||||
|
||||
This version splits prometheus-node-exporter chart recording and altering rules in separate config values.
|
||||
Instead of `defaultRules.rules.node` the 2 new variables `defaultRules.rules.nodeExporterAlerting` and `defaultRules.rules.nodeExporterRecording` are used.
|
||||
|
||||
Also the following defaultRules.rules has been removed as they had no effect: `kubeApiserverError`, `kubePrometheusNodeAlerting`, `kubernetesAbsent`, `time`.
|
||||
|
||||
The ability to set a rubookUrl via `defaultRules.rules.rubookUrl` was reintroduced.
|
||||
|
||||
## From 25.x to 26.x
|
||||
|
||||
This version enables the prometheus-node-exporter subchart servicemonitor by default again, by setting `prometheus-node-exporter.prometheus.monitor.enabled` to `true`.
|
||||
|
||||
## From 24.x to 25.x
|
||||
|
||||
This version upgrade to prometheus-operator v0.53.1. It removes support for setting a runbookUrl, since the upstream format for runbooks changed.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 23.x to 24.x
|
||||
|
||||
The custom `ServiceMonitor` for the _kube-state-metrics_ & _prometheus-node-exporter_ charts have been removed in favour of the built-in sub-chart `ServiceMonitor`; for both sub-charts this means that `ServiceMonitor` customisations happen via the values passed to the chart. If you haven't directly customised this behaviour then there are no changes required to upgrade, but if you have please read the following.
|
||||
|
||||
For _kube-state-metrics_ the `ServiceMonitor` customisation is now set via `kube-state-metrics.prometheus.monitor` and the `kubeStateMetrics.serviceMonitor.selfMonitor.enabled` value has moved to `kube-state-metrics.selfMonitor.enabled`.
|
||||
|
||||
For _prometheus-node-exporter_ the `ServiceMonitor` customisation is now set via `prometheus-node-exporter.prometheus.monitor` and the `nodeExporter.jobLabel` values has moved to `prometheus-node-exporter.prometheus.monitor.jobLabel`.
|
||||
|
||||
## From 22.x to 23.x
|
||||
|
||||
Port names have been renamed for Istio's
|
||||
[explicit protocol selection](https://istio.io/latest/docs/ops/configuration/traffic-management/protocol-selection/#explicit-protocol-selection).
|
||||
|
||||
| | old value | new value |
|
||||
|-|-----------|-----------|
|
||||
| `alertmanager.alertmanagerSpec.portName` | `web` | `http-web` |
|
||||
| `grafana.service.portName` | `service` | `http-web` |
|
||||
| `prometheus-node-exporter.service.portName` | `metrics` (hardcoded) | `http-metrics` |
|
||||
| `prometheus.prometheusSpec.portName` | `web` | `http-web` |
|
||||
|
||||
## From 21.x to 22.x
|
||||
|
||||
Due to the upgrade of the `kube-state-metrics` chart, removal of its deployment/stateful needs to done manually prior to upgrading:
|
||||
|
||||
```console
|
||||
kubectl delete deployments.apps -l app.kubernetes.io/instance=prometheus-operator,app.kubernetes.io/name=kube-state-metrics --cascade=orphan
|
||||
```
|
||||
|
||||
or if you use autosharding:
|
||||
|
||||
```console
|
||||
kubectl delete statefulsets.apps -l app.kubernetes.io/instance=prometheus-operator,app.kubernetes.io/name=kube-state-metrics --cascade=orphan
|
||||
```
|
||||
|
||||
## From 20.x to 21.x
|
||||
|
||||
The config reloader values have been refactored. All the values have been moved to the key `prometheusConfigReloader` and the limits and requests can now be set separately.
|
||||
|
||||
## From 19.x to 20.x
|
||||
|
||||
Version 20 upgrades prometheus-operator from 0.50.x to 0.52.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating:
|
||||
|
||||
```console
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 18.x to 19.x
|
||||
|
||||
`kubeStateMetrics.serviceMonitor.namespaceOverride` was removed.
|
||||
Please use `kube-state-metrics.namespaceOverride` instead.
|
||||
|
||||
## From 17.x to 18.x
|
||||
|
||||
Version 18 upgrades prometheus-operator from 0.49.x to 0.50.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating:
|
||||
|
||||
```console
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 16.x to 17.x
|
||||
|
||||
Version 17 upgrades prometheus-operator from 0.48.x to 0.49.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating:
|
||||
|
||||
```console
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 15.x to 16.x
|
||||
|
||||
Version 16 upgrades kube-state-metrics to v2.0.0. This includes changed command-line arguments and removed metrics, see this [blog post](https://kubernetes.io/blog/2021/04/13/kube-state-metrics-v-2-0/). This version also removes Grafana dashboards that supported Kubernetes 1.14 or earlier.
|
||||
|
||||
## From 14.x to 15.x
|
||||
|
||||
Version 15 upgrades prometheus-operator from 0.46.x to 0.47.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating:
|
||||
|
||||
```console
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 13.x to 14.x
|
||||
|
||||
Version 14 upgrades prometheus-operator from 0.45.x to 0.46.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating:
|
||||
|
||||
```console
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
## From 12.x to 13.x
|
||||
|
||||
Version 13 upgrades prometheus-operator from 0.44.x to 0.45.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
|
||||
|
||||
```console
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
```
|
||||
|
||||
## From 11.x to 12.x
|
||||
|
||||
Version 12 upgrades prometheus-operator from 0.43.x to 0.44.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
|
||||
|
||||
```console
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.44/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
```
|
||||
|
||||
The chart was migrated to support only helm v3 and later.
|
||||
|
||||
## From 10.x to 11.x
|
||||
|
||||
Version 11 upgrades prometheus-operator from 0.42.x to 0.43.x. Starting with 0.43.x an additional `AlertmanagerConfigs` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
|
||||
|
||||
```console
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.43/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
```
|
||||
|
||||
Version 11 removes the deprecated tlsProxy via ghostunnel in favor of native TLS support the prometheus-operator gained with v0.39.0.
|
||||
|
||||
## From 9.x to 10.x
|
||||
|
||||
Version 10 upgrades prometheus-operator from 0.38.x to 0.42.x. Starting with 0.40.x an additional `Probes` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
|
||||
|
||||
```console
|
||||
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.42/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
```
|
||||
|
||||
## From 8.x to 9.x
|
||||
|
||||
Version 9 of the helm chart removes the existing `additionalScrapeConfigsExternal` in favour of `additionalScrapeConfigsSecret`. This change lets users specify the secret name and secret key to use for the additional scrape configuration of prometheus. This is useful for users that have prometheus-operator as a subchart and also have a template that creates the additional scrape configuration.
|
||||
|
||||
## From 7.x to 8.x
|
||||
|
||||
Due to new template functions being used in the rules in version 8.x.x of the chart, an upgrade to Prometheus Operator and Prometheus is necessary in order to support them. First, upgrade to the latest version of 7.x.x
|
||||
|
||||
```console
|
||||
helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version 7.5.0
|
||||
```
|
||||
|
||||
Then upgrade to 8.x.x
|
||||
|
||||
```console
|
||||
helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version [8.x.x]
|
||||
```
|
||||
|
||||
Minimal recommended Prometheus version for this chart release is `2.12.x`
|
||||
|
||||
## From 6.x to 7.x
|
||||
|
||||
Due to a change in grafana subchart, version 7.x.x now requires Helm >= 2.12.0.
|
||||
|
||||
## From 5.x to 6.x
|
||||
|
||||
Due to a change in deployment labels of kube-state-metrics, the upgrade requires `helm upgrade --force` in order to re-create the deployment. If this is not done an error will occur indicating that the deployment cannot be modified:
|
||||
|
||||
```console
|
||||
invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app.kubernetes.io/name":"kube-state-metrics"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable
|
||||
```
|
||||
|
||||
If this error has already been encountered, a `helm history` command can be used to determine which release has worked, then `helm rollback` to the release, then `helm upgrade --force` to this new one
|
||||
3
charts/kube-prometheus-stack/charts/crds/Chart.yaml
Normal file
3
charts/kube-prometheus-stack/charts/crds/Chart.yaml
Normal file
@ -0,0 +1,3 @@
|
||||
apiVersion: v2
|
||||
name: crds
|
||||
version: 0.0.0
|
||||
3
charts/kube-prometheus-stack/charts/crds/README.md
Normal file
3
charts/kube-prometheus-stack/charts/crds/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# crds subchart
|
||||
|
||||
See: [https://github.com/prometheus-community/helm-charts/issues/3548](https://github.com/prometheus-community/helm-charts/issues/3548)
|
||||
File diff suppressed because it is too large
Load Diff
9165
charts/kube-prometheus-stack/charts/crds/crds/crd-alertmanagers.yaml
Normal file
9165
charts/kube-prometheus-stack/charts/crds/crds/crd-alertmanagers.yaml
Normal file
File diff suppressed because it is too large
Load Diff
1213
charts/kube-prometheus-stack/charts/crds/crds/crd-podmonitors.yaml
Normal file
1213
charts/kube-prometheus-stack/charts/crds/crds/crd-podmonitors.yaml
Normal file
File diff suppressed because it is too large
Load Diff
1180
charts/kube-prometheus-stack/charts/crds/crds/crd-probes.yaml
Normal file
1180
charts/kube-prometheus-stack/charts/crds/crds/crd-probes.yaml
Normal file
File diff suppressed because it is too large
Load Diff
10856
charts/kube-prometheus-stack/charts/crds/crds/crd-prometheusagents.yaml
Normal file
10856
charts/kube-prometheus-stack/charts/crds/crds/crd-prometheusagents.yaml
Normal file
File diff suppressed because it is too large
Load Diff
13122
charts/kube-prometheus-stack/charts/crds/crds/crd-prometheuses.yaml
Normal file
13122
charts/kube-prometheus-stack/charts/crds/crds/crd-prometheuses.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,160 @@
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.82.2/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
operator.prometheus.io/version: 0.82.2
|
||||
name: prometheusrules.monitoring.coreos.com
|
||||
spec:
|
||||
group: monitoring.coreos.com
|
||||
names:
|
||||
categories:
|
||||
- prometheus-operator
|
||||
kind: PrometheusRule
|
||||
listKind: PrometheusRuleList
|
||||
plural: prometheusrules
|
||||
shortNames:
|
||||
- promrule
|
||||
singular: prometheusrule
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
The `PrometheusRule` custom resource definition (CRD) defines [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) and [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) rules to be evaluated by `Prometheus` or `ThanosRuler` objects.
|
||||
|
||||
`Prometheus` and `ThanosRuler` objects select `PrometheusRule` objects using label and namespace selectors.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: Specification of desired alerting rule definitions for Prometheus.
|
||||
properties:
|
||||
groups:
|
||||
description: Content of Prometheus rule file
|
||||
items:
|
||||
description: RuleGroup is a list of sequentially evaluated recording
|
||||
and alerting rules.
|
||||
properties:
|
||||
interval:
|
||||
description: Interval determines how often rules in the group
|
||||
are evaluated.
|
||||
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
|
||||
type: string
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Labels to add or overwrite before storing the result for its rules.
|
||||
The labels defined at the rule level take precedence.
|
||||
|
||||
It requires Prometheus >= 3.0.0.
|
||||
The field is ignored for Thanos Ruler.
|
||||
type: object
|
||||
limit:
|
||||
description: |-
|
||||
Limit the number of alerts an alerting rule and series a recording
|
||||
rule can produce.
|
||||
Limit is supported starting with Prometheus >= 2.31 and Thanos Ruler >= 0.24.
|
||||
type: integer
|
||||
name:
|
||||
description: Name of the rule group.
|
||||
minLength: 1
|
||||
type: string
|
||||
partial_response_strategy:
|
||||
description: |-
|
||||
PartialResponseStrategy is only used by ThanosRuler and will
|
||||
be ignored by Prometheus instances.
|
||||
More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response
|
||||
pattern: ^(?i)(abort|warn)?$
|
||||
type: string
|
||||
query_offset:
|
||||
description: |-
|
||||
Defines the offset the rule evaluation timestamp of this particular group by the specified duration into the past.
|
||||
|
||||
It requires Prometheus >= v2.53.0.
|
||||
It is not supported for ThanosRuler.
|
||||
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
|
||||
type: string
|
||||
rules:
|
||||
description: List of alerting and recording rules.
|
||||
items:
|
||||
description: |-
|
||||
Rule describes an alerting or recording rule
|
||||
See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) rule
|
||||
properties:
|
||||
alert:
|
||||
description: |-
|
||||
Name of the alert. Must be a valid label value.
|
||||
Only one of `record` and `alert` must be set.
|
||||
type: string
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Annotations to add to each alert.
|
||||
Only valid for alerting rules.
|
||||
type: object
|
||||
expr:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: PromQL expression to evaluate.
|
||||
x-kubernetes-int-or-string: true
|
||||
for:
|
||||
description: Alerts are considered firing once they have
|
||||
been returned for this long.
|
||||
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
|
||||
type: string
|
||||
keep_firing_for:
|
||||
description: KeepFiringFor defines how long an alert will
|
||||
continue firing after the condition that triggered it
|
||||
has cleared.
|
||||
minLength: 1
|
||||
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
|
||||
type: string
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Labels to add or overwrite.
|
||||
type: object
|
||||
record:
|
||||
description: |-
|
||||
Name of the time series to output to. Must be a valid metric name.
|
||||
Only one of `record` and `alert` must be set.
|
||||
type: string
|
||||
required:
|
||||
- expr
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- name
|
||||
x-kubernetes-list-type: map
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
12520
charts/kube-prometheus-stack/charts/crds/crds/crd-scrapeconfigs.yaml
Normal file
12520
charts/kube-prometheus-stack/charts/crds/crds/crd-scrapeconfigs.yaml
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
9267
charts/kube-prometheus-stack/charts/crds/crds/crd-thanosrulers.yaml
Normal file
9267
charts/kube-prometheus-stack/charts/crds/crds/crd-thanosrulers.yaml
Normal file
File diff suppressed because it is too large
Load Diff
BIN
charts/kube-prometheus-stack/charts/crds/files/crds.bz2
Normal file
BIN
charts/kube-prometheus-stack/charts/crds/files/crds.bz2
Normal file
Binary file not shown.
@ -0,0 +1,20 @@
|
||||
{{/* Shortened name suffixed with upgrade-crd */}}
|
||||
{{- define "kube-prometheus-stack.crd.upgradeJob.name" -}}
|
||||
{{- print (include "kube-prometheus-stack.fullname" .) "-upgrade" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kube-prometheus-stack.crd.upgradeJob.labels" -}}
|
||||
{{- include "kube-prometheus-stack.labels" . }}
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-operator
|
||||
app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-prometheus-operator
|
||||
app.kubernetes.io/component: crds-upgrade
|
||||
{{- end -}}
|
||||
|
||||
{{/* Create the name of crd.upgradeJob service account to use */}}
|
||||
{{- define "kube-prometheus-stack.crd.upgradeJob.serviceAccountName" -}}
|
||||
{{- if .Values.upgradeJob.serviceAccount.create -}}
|
||||
{{ default (include "kube-prometheus-stack.crd.upgradeJob.name" .) .Values.upgradeJob.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.upgradeJob.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@ -0,0 +1,28 @@
|
||||
{{- if .Values.upgradeJob.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.crd.upgradeJob.name" . }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,pre-rollback
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
{{- include "kube-prometheus-stack.crd.upgradeJob.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "apiextensions.k8s.io"
|
||||
resources:
|
||||
- "customresourcedefinitions"
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- get
|
||||
- list
|
||||
resourceNames:
|
||||
{{- range $path, $_ := $.Files.Glob "crds/*.yaml" }}
|
||||
- {{ ($.Files.Get $path | fromYaml ).metadata.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,21 @@
|
||||
{{- if .Values.upgradeJob.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.crd.upgradeJob.name" . }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,pre-rollback
|
||||
"helm.sh/hook-weight": "-3"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
{{- include "kube-prometheus-stack.crd.upgradeJob.labels" . | nindent 4 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
name: {{ template "kube-prometheus-stack.crd.upgradeJob.serviceAccountName" . }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ template "kube-prometheus-stack.crd.upgradeJob.name" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
@ -0,0 +1,15 @@
|
||||
{{- if .Values.upgradeJob.enabled }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "kube-prometheus-stack.crd.upgradeJob.serviceAccountName" . }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,pre-rollback
|
||||
"helm.sh/hook-weight": "-2"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
{{- include "kube-prometheus-stack.crd.upgradeJob.labels" . | nindent 4 }}
|
||||
binaryData:
|
||||
crds.bz2: {{ .Files.Get "files/crds.bz2" | b64enc }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,146 @@
|
||||
{{- if .Values.upgradeJob.enabled }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.crd.upgradeJob.name" . }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,pre-rollback
|
||||
"helm.sh/hook-weight": "5"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
{{- with .Values.upgradeJob.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "kube-prometheus-stack.crd.upgradeJob.labels" . | nindent 4 }}
|
||||
{{- with .Values.upgradeJob.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
backoffLimit: 3
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.upgradeJob.podLabels }}
|
||||
labels:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.upgradeJob.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- include "kube-prometheus-stack.imagePullSecrets" . | indent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "kube-prometheus-stack.crd.upgradeJob.serviceAccountName" . }}
|
||||
initContainers:
|
||||
- name: busybox
|
||||
{{- $busyboxRegistry := .Values.global.imageRegistry | default .Values.upgradeJob.image.busybox.registry -}}
|
||||
{{- if .Values.upgradeJob.image.sha }}
|
||||
image: "{{ $busyboxRegistry }}/{{ .Values.upgradeJob.image.busybox.repository }}:{{ .Values.upgradeJob.image.busybox.tag }}@sha256:{{ .Values.upgradeJob.image.busybox.sha }}"
|
||||
{{- else }}
|
||||
image: "{{ $busyboxRegistry }}/{{ .Values.upgradeJob.image.busybox.repository }}:{{ .Values.upgradeJob.image.busybox.tag }}"
|
||||
{{- end }}
|
||||
imagePullPolicy: "{{ .Values.upgradeJob.image.busybox.pullPolicy }}"
|
||||
workingDir: /tmp/
|
||||
command:
|
||||
- sh
|
||||
args:
|
||||
- -c
|
||||
- bzcat /crds/crds.bz2 > /tmp/crds.yaml
|
||||
{{- with .Values.upgradeJob.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.upgradeJob.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /crds/
|
||||
name: crds
|
||||
- mountPath: /tmp/
|
||||
name: tmp
|
||||
{{- with .Values.upgradeJob.extraVolumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.upgradeJob.env }}
|
||||
env:
|
||||
{{- range $key, $value := . }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: kubectl
|
||||
{{- $kubectlRegistry := .Values.global.imageRegistry | default .Values.upgradeJob.image.kubectl.registry -}}
|
||||
{{- $defaultKubernetesVersion := regexFind "v\\d+\\.\\d+\\.\\d+" .Capabilities.KubeVersion.Version }}
|
||||
{{- if .Values.upgradeJob.image.kubectl.sha }}
|
||||
image: "{{ $kubectlRegistry }}/{{ .Values.upgradeJob.image.kubectl.repository }}:{{ .Values.upgradeJob.image.kubectl.tag | default $defaultKubernetesVersion }}@sha256:{{ .Values.upgradeJob.image.kubectl.sha }}"
|
||||
{{- else }}
|
||||
image: "{{ $kubectlRegistry }}/{{ .Values.upgradeJob.image.kubectl.repository }}:{{ .Values.upgradeJob.image.kubectl.tag | default $defaultKubernetesVersion }}"
|
||||
{{- end }}
|
||||
imagePullPolicy: "{{ .Values.upgradeJob.image.kubectl.pullPolicy }}"
|
||||
command:
|
||||
- kubectl
|
||||
args:
|
||||
- apply
|
||||
- --server-side
|
||||
{{- if .Values.upgradeJob.forceConflicts }}
|
||||
- --force-conflicts
|
||||
{{- end }}
|
||||
- --filename
|
||||
- /tmp/crds.yaml
|
||||
{{- with .Values.upgradeJob.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.upgradeJob.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/
|
||||
name: tmp
|
||||
{{- with .Values.upgradeJob.extraVolumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.upgradeJob.env }}
|
||||
env:
|
||||
{{- range $key, $value := . }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
- name: crds
|
||||
configMap:
|
||||
name: {{ template "kube-prometheus-stack.crd.upgradeJob.name" . }}
|
||||
{{- with .Values.upgradeJob.extraVolumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
restartPolicy: OnFailure
|
||||
{{- with .Values.upgradeJob.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.upgradeJob.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.upgradeJob.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.upgradeJob.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.upgradeJob.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,20 @@
|
||||
{{- if and .Values.upgradeJob.enabled .Values.upgradeJob.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
automountServiceAccountToken: {{ .Values.upgradeJob.serviceAccount.automountServiceAccountToken }}
|
||||
metadata:
|
||||
name: {{ include "kube-prometheus-stack.crd.upgradeJob.serviceAccountName" . }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,pre-rollback
|
||||
"helm.sh/hook-weight": "-4"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
{{- with .Values.upgradeJob.serviceAccount.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "kube-prometheus-stack.crd.upgradeJob.labels" . | nindent 4 }}
|
||||
{{- with .Values.upgradeJob.serviceAccount.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,6 @@
|
||||
suite: test rendering
|
||||
tests:
|
||||
# 5307
|
||||
- it: should render when no values are set
|
||||
asserts:
|
||||
- notFailedTemplate: {}
|
||||
4
charts/kube-prometheus-stack/charts/crds/values.yaml
Normal file
4
charts/kube-prometheus-stack/charts/crds/values.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
## Check out kube-prometheus-stack/values.yaml for more information
|
||||
## on this parameter
|
||||
upgradeJob:
|
||||
enabled: false
|
||||
@ -0,0 +1,41 @@
|
||||
alertmanager:
|
||||
enabled: false
|
||||
coreDns:
|
||||
enabled: false
|
||||
kubeApiServer:
|
||||
enabled: false
|
||||
kubeControllerManager:
|
||||
enabled: false
|
||||
kubeDns:
|
||||
enabled: false
|
||||
kubeEtcd:
|
||||
enabled: false
|
||||
kubeProxy:
|
||||
enabled: false
|
||||
kubeScheduler:
|
||||
enabled: false
|
||||
kubeStateMetrics:
|
||||
enabled: false
|
||||
kubelet:
|
||||
enabled: false
|
||||
nodeExporter:
|
||||
enabled: false
|
||||
grafana:
|
||||
enabled: false
|
||||
prometheus:
|
||||
enabled: false
|
||||
defaultRules:
|
||||
create: false
|
||||
# Default configuration of prometheus operator will create CRDs in the cluster idempotently
|
||||
prometheusOperator:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
selfMonitor: false
|
||||
tls:
|
||||
enabled: false
|
||||
admissionWebhooks:
|
||||
enabled: false
|
||||
namespaces:
|
||||
releaseNamespace: true
|
||||
additional:
|
||||
- kube-system
|
||||
@ -0,0 +1,13 @@
|
||||
prometheusOperator:
|
||||
namespaces:
|
||||
releaseNamespace: true
|
||||
additional:
|
||||
- kube-system
|
||||
|
||||
prometheus-node-exporter:
|
||||
service:
|
||||
targetPort: 9101
|
||||
port: 9101
|
||||
|
||||
crds:
|
||||
enabled: false
|
||||
95
charts/kube-prometheus-stack/ci/03-non-defaults-values.yaml
Normal file
95
charts/kube-prometheus-stack/ci/03-non-defaults-values.yaml
Normal file
@ -0,0 +1,95 @@
|
||||
# this file tests some non default values to increase the test coverage
|
||||
|
||||
defaultRules:
|
||||
additionalRuleLabels:
|
||||
key: value
|
||||
additionalRuleGroupLabels:
|
||||
kubernetesSystem:
|
||||
key2: value2
|
||||
|
||||
prometheusOperator:
|
||||
denyNamespaces:
|
||||
- kube-system
|
||||
admissionWebhooks:
|
||||
namespaceSelector:
|
||||
matchLabels:
|
||||
key: value
|
||||
matchExpressions:
|
||||
- key: control-plane
|
||||
operator: NotIn
|
||||
values:
|
||||
- "true"
|
||||
extraArgs:
|
||||
- --labels="cluster=talos-cluster"
|
||||
|
||||
alertmanager:
|
||||
alertmanagerSpec:
|
||||
additionalConfig:
|
||||
logFormat: json
|
||||
additionalConfigString: |-
|
||||
logLevel: {{ print "debug" | quote }}
|
||||
|
||||
prometheus:
|
||||
prometheusSpec:
|
||||
additionalConfig:
|
||||
logFormat: json
|
||||
additionalConfigString: |-
|
||||
logLevel: {{ print "debug" | quote }}
|
||||
|
||||
customRules:
|
||||
AlertmanagerFailedReload:
|
||||
for: 3m
|
||||
AlertmanagerMembersInconsistent:
|
||||
for: 5m
|
||||
severity: "warning"
|
||||
|
||||
kubeControllerManager:
|
||||
service:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
selector:
|
||||
matchLabels:
|
||||
component: kube-controller-manager
|
||||
coreDns:
|
||||
service:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
port: metrics
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: '{{ $.Release.Name }}'
|
||||
kubeEtcd:
|
||||
service:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
selector:
|
||||
matchLabels:
|
||||
component: etcd
|
||||
kubeScheduler:
|
||||
service:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
selector:
|
||||
matchLabels:
|
||||
component: kube-scheduler
|
||||
kubeProxy:
|
||||
service:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-proxy
|
||||
|
||||
grafana:
|
||||
sidecar:
|
||||
datasources:
|
||||
alertmanager:
|
||||
name: 0
|
||||
|
||||
nodeExporter:
|
||||
enabled: true
|
||||
forceDeployDashboards: true
|
||||
|
||||
prometheus-node-exporter:
|
||||
kubeRBACProxy:
|
||||
enabled: true
|
||||
@ -0,0 +1,8 @@
|
||||
prometheusOperator:
|
||||
admissionWebhooks:
|
||||
validatingWebhookConfiguration:
|
||||
annotations:
|
||||
test: test1
|
||||
test2: test3
|
||||
deployment:
|
||||
enabled: true
|
||||
@ -0,0 +1,90 @@
|
||||
alertmanager:
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- "*.example.com"
|
||||
route:
|
||||
main:
|
||||
enabled: true
|
||||
hostnames:
|
||||
- "*.example.com"
|
||||
filters:
|
||||
- type: RequestHeaderModifier
|
||||
requestHeaderModifier:
|
||||
set:
|
||||
- name: my-header-name
|
||||
value: my-new-header-value
|
||||
additionalRules:
|
||||
- hostnames:
|
||||
- "foo.example.com"
|
||||
filters:
|
||||
- type: RequestHeaderModifier
|
||||
requestHeaderModifier:
|
||||
set:
|
||||
- name: my-header-name
|
||||
value: my-new-header-value
|
||||
matches:
|
||||
- path:
|
||||
type: PathPrefix
|
||||
value: /foo/
|
||||
|
||||
|
||||
prometheus:
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- "*.example.com"
|
||||
route:
|
||||
main:
|
||||
enabled: true
|
||||
hostnames:
|
||||
- "*.example.com"
|
||||
filters:
|
||||
- type: RequestHeaderModifier
|
||||
requestHeaderModifier:
|
||||
set:
|
||||
- name: my-header-name
|
||||
value: my-new-header-value
|
||||
additionalRules:
|
||||
- hostnames:
|
||||
- "foo.example.com"
|
||||
filters:
|
||||
- type: RequestHeaderModifier
|
||||
requestHeaderModifier:
|
||||
set:
|
||||
- name: my-header-name
|
||||
value: my-new-header-value
|
||||
matches:
|
||||
- path:
|
||||
type: PathPrefix
|
||||
value: /foo/
|
||||
|
||||
thanosRuler:
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- "*.example.com"
|
||||
route:
|
||||
main:
|
||||
enabled: true
|
||||
hostnames:
|
||||
- "*.example.com"
|
||||
filters:
|
||||
- type: RequestHeaderModifier
|
||||
requestHeaderModifier:
|
||||
set:
|
||||
- name: my-header-name
|
||||
value: my-new-header-value
|
||||
additionalRules:
|
||||
- hostnames:
|
||||
- "foo.example.com"
|
||||
filters:
|
||||
- type: RequestHeaderModifier
|
||||
requestHeaderModifier:
|
||||
set:
|
||||
- name: my-header-name
|
||||
value: my-new-header-value
|
||||
matches:
|
||||
- path:
|
||||
type: PathPrefix
|
||||
value: /foo/
|
||||
@ -0,0 +1,3 @@
|
||||
crds:
|
||||
upgradeJob:
|
||||
enabled: true
|
||||
46
charts/kube-prometheus-stack/ci/lint.sh
Normal file
46
charts/kube-prometheus-stack/ci/lint.sh
Normal file
@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
{
|
||||
SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)
|
||||
|
||||
cd "${SCRIPT_DIR}/../"
|
||||
|
||||
./hack/update_crds.sh
|
||||
if ! git diff "$GITHUB_SHA" --color=always --exit-code; then
|
||||
echo "Please run ./hack/update_crds.sh"
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd hack
|
||||
|
||||
export PIP_DISABLE_PIP_VERSION_CHECK=1
|
||||
|
||||
python3 -m venv venv
|
||||
# shellcheck disable=SC1091
|
||||
source venv/bin/activate
|
||||
|
||||
pip3 install -r requirements.txt
|
||||
|
||||
go install -a github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
|
||||
PATH="$(go env GOPATH)/bin:$PATH"
|
||||
export PATH
|
||||
|
||||
./sync_prometheus_rules.py
|
||||
if ! git diff "$GITHUB_SHA" --color=always --exit-code; then
|
||||
echo "Changes inside rules are not supported!"
|
||||
echo "Please go into the ./hack/ directory and run ./sync_prometheus_rules.py"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
./sync_grafana_dashboards.py
|
||||
if ! git diff "$GITHUB_SHA" --color=always --exit-code; then
|
||||
echo "Changes inside dashboards are not supported!"
|
||||
echo "Please go into the ./hack/ directory and run ./sync_grafana_dashboards.py"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf ./venv ./*.git
|
||||
} 2>&1
|
||||
1566
charts/kube-prometheus-stack/files/dashboards/k8s-coredns.json
Normal file
1566
charts/kube-prometheus-stack/files/dashboards/k8s-coredns.json
Normal file
File diff suppressed because it is too large
Load Diff
72
charts/kube-prometheus-stack/hack/README.md
Normal file
72
charts/kube-prometheus-stack/hack/README.md
Normal file
@ -0,0 +1,72 @@
|
||||
# kube-prometheus-stack hacks
|
||||
|
||||
## [update_mixins.sh](update_mixins.sh)
|
||||
|
||||
This script is a useful wrapper to run `sync_prometheus_rules.py` and
|
||||
`sync_grafana_dashboards.py`.
|
||||
|
||||
It clones all dependency dashboards into a tmp folder.
|
||||
|
||||
And it lets you know if you are missing commandline-tools necessary for the
|
||||
update to complete.
|
||||
|
||||
Therefore, if you want to create a PR that updates the mixins, please
|
||||
run `./hack/update_mixins.sh` from the charts directory
|
||||
(`./charts/kube-prometheus-stack`).
|
||||
|
||||
## [sync_prometheus_rules.py](sync_prometheus_rules.py)
|
||||
|
||||
This script generates prometheus rules set for alertmanager from any properly formatted kubernetes YAML based on defined input, splitting rules to separate files based on group name.
|
||||
|
||||
Currently following imported:
|
||||
|
||||
- [prometheus-operator/kube-prometheus rules set](https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/kubernetesControlPlane-prometheusRule.yaml)
|
||||
- In order to modify these rules:
|
||||
- prepare and merge PR into [kubernetes-mixin](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/rules) master and/or release branch
|
||||
- run import inside your fork of [prometheus-operator/kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/tree/main)
|
||||
|
||||
```bash
|
||||
jb update
|
||||
make generate
|
||||
```
|
||||
|
||||
- prepare and merge PR with imported changes into `prometheus-operator/kube-prometheus` master and/or release branch
|
||||
- run sync_prometheus_rules.py inside your fork of this repository
|
||||
- send PR with changes to this repository
|
||||
- [etcd-io/etcd rules set](https://github.com/etcd-io/etcd/blob/main/contrib/mixin/mixin.libsonnet).
|
||||
- In order to modify these rules:
|
||||
- prepare and merge PR into [etcd-io/etcd](https://github.com/etcd-io/etcd/blob/main/contrib/mixin/mixin.libsonnet) repository
|
||||
- run sync_prometheus_rules.py inside your fork of this repository
|
||||
- send PR with changes to this repository
|
||||
|
||||
## [sync_grafana_dashboards.py](sync_grafana_dashboards.py)
|
||||
|
||||
This script generates grafana dashboards from json files, splitting them to separate files based on group name.
|
||||
|
||||
Currently following imported:
|
||||
|
||||
- [prometheus-operator/kube-prometheus dashboards](https://github.com/prometheus-operator/kube-prometheus/tree/main/manifests/grafana-deployment.yaml)
|
||||
- In order to modify these dashboards:
|
||||
- prepare and merge PR into [kubernetes-mixin](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/dashboards) master and/or release branch
|
||||
- run import inside your fork of [prometheus-operator/kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/tree/main)
|
||||
|
||||
```bash
|
||||
jb update
|
||||
make generate
|
||||
```
|
||||
|
||||
- prepare and merge PR with imported changes into `prometheus-operator/kube-prometheus` master and/or release branch
|
||||
- run sync_grafana_dashboards.py inside your fork of this repository
|
||||
- send PR with changes to this repository
|
||||
|
||||
<!-- textlint-disable -->
|
||||
|
||||
- [etcd-io/website dashboard](https://github.com/etcd-io/etcd/blob/main/contrib/mixin/mixin.libsonnet)
|
||||
- In order to modify this dashboard:
|
||||
- prepare and merge PR into [etcd-io/etcd](https://github.com/etcd-io/etcd/blob/main/contrib/mixin/mixin.libsonnet) repository
|
||||
- run sync_grafana_dashboards.py inside your fork of this repository
|
||||
- send PR with changes to this repository
|
||||
|
||||
<!-- textlint-enable -->
|
||||
|
||||
[CoreDNS dashboard](https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/templates/grafana/dashboards-1.14/k8s-coredns.yaml) is the only dashboard which is maintained in this repository and can be changed without import.
|
||||
5
charts/kube-prometheus-stack/hack/minikube/README.md
Normal file
5
charts/kube-prometheus-stack/hack/minikube/README.md
Normal file
@ -0,0 +1,5 @@
|
||||
# Testing on Minikube
|
||||
|
||||
The configuration in this folder lets you locally test the setup on minikube. Use cmd.sh to set up components and hack a working etcd scrape configuration. Run the commands in the sequence listed in the script to get a local working minikube cluster.
|
||||
|
||||
If you're using windows, there's a commented-out section that you should add to the minikube command.
|
||||
82
charts/kube-prometheus-stack/hack/minikube/cmd.sh
Normal file
82
charts/kube-prometheus-stack/hack/minikube/cmd.sh
Normal file
@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
HELM_RELEASE_NAME=prom-op
|
||||
CHART=./
|
||||
NAMESPACE=monitoring
|
||||
VALUES_FILES=./hack/minikube/values.yaml
|
||||
|
||||
if [ "$1" = "reset-minikube" ]; then
|
||||
minikube delete
|
||||
minikube start \
|
||||
#--vm-driver hyperv --hyperv-virtual-switch "Default Switch" \
|
||||
--kubernetes-version=v1.13.3 \
|
||||
--memory=4096 --bootstrapper=kubeadm \
|
||||
--extra-config=kubelet.authentication-token-webhook=true \
|
||||
--extra-config=kubelet.authorization-mode=Webhook \
|
||||
--extra-config=scheduler.address=0.0.0.0 \
|
||||
--extra-config=controller-manager.address=0.0.0.0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "init-helm" ]; then
|
||||
helm init
|
||||
helm repo update
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "init-etcd-secret" ]; then
|
||||
kubectl create namespace monitoring
|
||||
kubectl delete secret etcd-certs -nmonitoring
|
||||
kubectl create secret generic etcd-certs -nmonitoring \
|
||||
--from-literal=ca.crt="$(kubectl exec kube-apiserver-minikube -nkube-system -- cat /var/lib/minikube/certs/etcd/ca.crt)" \
|
||||
--from-literal=client.crt="$(kubectl exec kube-apiserver-minikube -nkube-system -- cat /var/lib/minikube/certs/apiserver-etcd-client.crt)" \
|
||||
--from-literal=client.key="$(kubectl exec kube-apiserver-minikube -nkube-system -- cat /var/lib/minikube/certs/apiserver-etcd-client.key)"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
if [ "$1" = "upgrade-install" ]; then
|
||||
helm upgrade $HELM_RELEASE_NAME $CHART \
|
||||
--namespace $NAMESPACE \
|
||||
--values $VALUES_FILES \
|
||||
--set grafana.podAnnotations.redeploy-hack="$(cat /proc/sys/kernel/random/uuid)" \
|
||||
--install --debug
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$1" = "port-forward" ]; then
|
||||
killall kubectl &>/dev/null
|
||||
kubectl port-forward service/prom-op-prometheus-operato-prometheus 9090 &>/dev/null &
|
||||
kubectl port-forward service/prom-op-prometheus-operato-alertmanager 9093 &>/dev/null &
|
||||
kubectl port-forward service/prom-op-grafana 3000:80 &>/dev/null &
|
||||
echo "Started port-forward commands"
|
||||
echo "localhost:9090 - prometheus"
|
||||
echo "localhost:9093 - alertmanager"
|
||||
echo "localhost:3000 - grafana"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
cat << EOF
|
||||
Usage:
|
||||
install.sh <COMMAND>
|
||||
|
||||
Commands:
|
||||
reset-minikube - resets minikube with values suitable for running prometheus operator
|
||||
the normal installation will not allow scraping of the kubelet,
|
||||
scheduler or controller-manager components
|
||||
init-helm - initialize helm and update repository so that we can install
|
||||
the kube-prometheus-stack chart. This has to be run only once after
|
||||
a minikube installation is done
|
||||
init-etcd-secret - pulls the certs used to access etcd from the api server and creates
|
||||
a secret in the monitoring namespace with them. The values files
|
||||
in the install command assume that this secret exists and is valid.
|
||||
If not, then prometheus will not start
|
||||
upgrade-install - install or upgrade the kube-prometheus-stack chart in the cluster
|
||||
port-forward - starts port-forwarding for prometheus, alertmanager, grafana
|
||||
localhost:9090 - prometheus
|
||||
localhost:9093 - alertmanager
|
||||
localhost:3000 - grafana
|
||||
EOF
|
||||
|
||||
exit 0
|
||||
9
charts/kube-prometheus-stack/hack/minikube/values.yaml
Normal file
9
charts/kube-prometheus-stack/hack/minikube/values.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
prometheus:
|
||||
prometheusSpec:
|
||||
secrets: [etcd-certs]
|
||||
kubeEtcd:
|
||||
serviceMonitor:
|
||||
scheme: https
|
||||
caFile: /etc/prometheus/secrets/etcd-certs/ca.crt
|
||||
certFile: /etc/prometheus/secrets/etcd-certs/client.crt
|
||||
keyFile: /etc/prometheus/secrets/etcd-certs/client.key
|
||||
@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR=$(cd -- "$(dirname -- "${0}")" &>/dev/null && pwd)
|
||||
|
||||
if ! which jb &>/dev/null; then
|
||||
if ! which go &>/dev/null; then
|
||||
echo "'jb' command not found"
|
||||
echo "Install jsonnet-bundler from https://github.com/jsonnet-bundler/jsonnet-bundler"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "'jb' command not found. Try to install it from github.com/jsonnet-bundler/jsonnet-bundler"
|
||||
|
||||
go install -a github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
|
||||
PATH="$(go env GOPATH)/bin:$PATH"
|
||||
export PATH
|
||||
|
||||
if ! which jb &>/dev/null; then
|
||||
echo "'jb' command not found"
|
||||
echo "Install jsonnet-bundler from https://github.com/jsonnet-bundler/jsonnet-bundler"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
rm -rf "${SCRIPT_DIR}/tmp"
|
||||
mkdir "${SCRIPT_DIR}/tmp"
|
||||
|
||||
export PIP_DISABLE_PIP_VERSION_CHECK=1
|
||||
|
||||
python3 -m venv "${SCRIPT_DIR}/tmp/venv"
|
||||
# shellcheck disable=SC1091
|
||||
source "${SCRIPT_DIR}/tmp/venv/bin/activate"
|
||||
|
||||
pip3 install -r "${SCRIPT_DIR}/requirements.txt"
|
||||
|
||||
"${SCRIPT_DIR}/sync_grafana_dashboards.py"
|
||||
"${SCRIPT_DIR}/sync_prometheus_rules.py"
|
||||
4
charts/kube-prometheus-stack/hack/requirements.txt
Normal file
4
charts/kube-prometheus-stack/hack/requirements.txt
Normal file
@ -0,0 +1,4 @@
|
||||
PyYAML==6.0.2
|
||||
requests==2.32.3
|
||||
# jsonnet was not updated on pip since 04/2023 - see #5283
|
||||
jsonnet @ git+https://github.com/google/jsonnet@5a4e8e3
|
||||
398
charts/kube-prometheus-stack/hack/sync_grafana_dashboards.py
Normal file
398
charts/kube-prometheus-stack/hack/sync_grafana_dashboards.py
Normal file
@ -0,0 +1,398 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Fetch dashboards from provided urls into this chart."""
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import textwrap
|
||||
|
||||
import _jsonnet
|
||||
import requests
|
||||
import yaml
|
||||
from yaml.representer import SafeRepresenter
|
||||
|
||||
|
||||
# https://stackoverflow.com/a/20863889/961092
|
||||
class LiteralStr(str):
|
||||
pass
|
||||
|
||||
|
||||
def change_style(style, representer):
|
||||
def new_representer(dumper, data):
|
||||
scalar = representer(dumper, data)
|
||||
scalar.style = style
|
||||
return scalar
|
||||
|
||||
return new_representer
|
||||
|
||||
|
||||
refs = {
|
||||
# renovate: git-refs=https://github.com/prometheus-operator/kube-prometheus branch=main
|
||||
'ref.kube-prometheus': '1e4df581de8897f16108bc4881be26e2a98c02b8',
|
||||
# renovate: git-refs=https://github.com/kubernetes-monitoring/kubernetes-mixin branch=master
|
||||
'ref.kubernetes-mixin': '6c82d5abe587b4c1dda7f1b0013af7d81e84c9fe',
|
||||
# renovate: git-refs=https://github.com/etcd-io/etcd branch=main
|
||||
'ref.etcd': '592c195ae21e8d58b7e2fef355e7067499d70edd',
|
||||
}
|
||||
|
||||
# Source files list
|
||||
charts = [
|
||||
{
|
||||
'source': '../files/dashboards/k8s-coredns.json',
|
||||
'destination': '../templates/grafana/dashboards-1.14',
|
||||
'type': 'dashboard_json',
|
||||
'min_kubernetes': '1.14.0-0',
|
||||
'multicluster_key': '.Values.grafana.sidecar.dashboards.multicluster.global.enabled',
|
||||
},
|
||||
{
|
||||
'source': 'https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/%s/manifests/grafana-dashboardDefinitions.yaml' % (refs['ref.kube-prometheus'],),
|
||||
'destination': '../templates/grafana/dashboards-1.14',
|
||||
'type': 'yaml',
|
||||
'min_kubernetes': '1.14.0-0',
|
||||
'multicluster_key': '.Values.grafana.sidecar.dashboards.multicluster.global.enabled',
|
||||
},
|
||||
{
|
||||
'git': 'https://github.com/kubernetes-monitoring/kubernetes-mixin.git',
|
||||
'branch': refs['ref.kubernetes-mixin'],
|
||||
'content': "(import 'dashboards/windows.libsonnet') + (import 'config.libsonnet') + { _config+:: { windowsExporterSelector: 'job=\"windows-exporter\"', }}",
|
||||
'cwd': '.',
|
||||
'destination': '../templates/grafana/dashboards-1.14',
|
||||
'min_kubernetes': '1.14.0-0',
|
||||
'type': 'jsonnet_mixin',
|
||||
'mixin_vars': {},
|
||||
'multicluster_key': '.Values.grafana.sidecar.dashboards.multicluster.global.enabled',
|
||||
},
|
||||
{
|
||||
'git': 'https://github.com/etcd-io/etcd.git',
|
||||
'branch': refs['ref.etcd'],
|
||||
'source': 'mixin.libsonnet',
|
||||
'cwd': 'contrib/mixin',
|
||||
'destination': '../templates/grafana/dashboards-1.14',
|
||||
'min_kubernetes': '1.14.0-0',
|
||||
'type': 'jsonnet_mixin',
|
||||
'mixin_vars': {'_config+': {}},
|
||||
'multicluster_key': '(or .Values.grafana.sidecar.dashboards.multicluster.global.enabled .Values.grafana.sidecar.dashboards.multicluster.etcd.enabled)'
|
||||
},
|
||||
]
|
||||
|
||||
# Additional conditions map
|
||||
condition_map = {
|
||||
'alertmanager-overview': ' (or .Values.alertmanager.enabled .Values.alertmanager.forceDeployDashboards)',
|
||||
'grafana-coredns-k8s': ' .Values.coreDns.enabled',
|
||||
'etcd': ' .Values.kubeEtcd.enabled',
|
||||
'apiserver': ' .Values.kubeApiServer.enabled',
|
||||
'controller-manager': ' .Values.kubeControllerManager.enabled',
|
||||
'kubelet': ' .Values.kubelet.enabled',
|
||||
'proxy': ' .Values.kubeProxy.enabled',
|
||||
'scheduler': ' .Values.kubeScheduler.enabled',
|
||||
'node-rsrc-use': ' (or .Values.nodeExporter.enabled .Values.nodeExporter.forceDeployDashboards)',
|
||||
'node-cluster-rsrc-use': ' (or .Values.nodeExporter.enabled .Values.nodeExporter.forceDeployDashboards)',
|
||||
'nodes': ' (and (or .Values.nodeExporter.enabled .Values.nodeExporter.forceDeployDashboards) .Values.nodeExporter.operatingSystems.linux.enabled)',
|
||||
'nodes-aix': ' (and (or .Values.nodeExporter.enabled .Values.nodeExporter.forceDeployDashboards) .Values.nodeExporter.operatingSystems.aix.enabled)',
|
||||
'nodes-darwin': ' (and (or .Values.nodeExporter.enabled .Values.nodeExporter.forceDeployDashboards) .Values.nodeExporter.operatingSystems.darwin.enabled)',
|
||||
'prometheus-remote-write': ' .Values.prometheus.prometheusSpec.remoteWriteDashboards',
|
||||
'k8s-coredns': ' .Values.coreDns.enabled',
|
||||
'k8s-windows-cluster-rsrc-use': ' .Values.windowsMonitoring.enabled',
|
||||
'k8s-windows-node-rsrc-use': ' .Values.windowsMonitoring.enabled',
|
||||
'k8s-resources-windows-cluster': ' .Values.windowsMonitoring.enabled',
|
||||
'k8s-resources-windows-namespace': ' .Values.windowsMonitoring.enabled',
|
||||
'k8s-resources-windows-pod': ' .Values.windowsMonitoring.enabled',
|
||||
}
|
||||
|
||||
replacement_map = {
|
||||
'var-namespace=$__cell_1': {
|
||||
'replacement': 'var-namespace=`}}{{ if .Values.grafana.sidecar.dashboards.enableNewTablePanelSyntax }}${__data.fields.namespace}{{ else }}$__cell_1{{ end }}{{`',
|
||||
},
|
||||
'var-type=$__cell_2': {
|
||||
'replacement': 'var-type=`}}{{ if .Values.grafana.sidecar.dashboards.enableNewTablePanelSyntax }}${__data.fields.workload_type}{{ else }}$__cell_2{{ end }}{{`',
|
||||
},
|
||||
'=$__cell': {
|
||||
'replacement': '=`}}{{ if .Values.grafana.sidecar.dashboards.enableNewTablePanelSyntax }}${__value.text}{{ else }}$__cell{{ end }}{{`',
|
||||
},
|
||||
'job=\\"prometheus-k8s\\",namespace=\\"monitoring\\"': {
|
||||
'replacement': '',
|
||||
},
|
||||
}
|
||||
|
||||
# standard header
|
||||
header = '''{{- /*
|
||||
Generated from '%(name)s' from %(url)s
|
||||
Do not change in-place! In order to change this file first read following link:
|
||||
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
|
||||
*/ -}}
|
||||
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
|
||||
{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=%(min_kubernetes)s" $kubeTargetVersion) (semverCompare "<%(max_kubernetes)s" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled%(condition)s }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: {{ template "kube-prometheus-stack-grafana.namespace" . }}
|
||||
name: {{ printf "%%s-%%s" (include "kube-prometheus-stack.fullname" $) "%(name)s" | trunc 63 | trimSuffix "-" }}
|
||||
annotations:
|
||||
{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }}
|
||||
labels:
|
||||
{{- if $.Values.grafana.sidecar.dashboards.label }}
|
||||
{{ $.Values.grafana.sidecar.dashboards.label }}: {{ ternary $.Values.grafana.sidecar.dashboards.labelValue "1" (not (empty $.Values.grafana.sidecar.dashboards.labelValue)) | quote }}
|
||||
{{- end }}
|
||||
app: {{ template "kube-prometheus-stack.name" $ }}-grafana
|
||||
{{ include "kube-prometheus-stack.labels" $ | indent 4 }}
|
||||
data:
|
||||
'''
|
||||
|
||||
# Add GrafanaDashboard custom resource
|
||||
grafana_dashboard_operator = """
|
||||
---
|
||||
{{- if and .Values.grafana.operator.dashboardsConfigMapRefEnabled (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=%(min_kubernetes)s" $kubeTargetVersion) (semverCompare "<%(max_kubernetes)s" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled%(condition)s }}
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: {{ printf "%%s-%%s" (include "kube-prometheus-stack.fullname" $) "%(name)s" | trunc 63 | trimSuffix "-" }}
|
||||
namespace: {{ template "kube-prometheus-stack-grafana.namespace" . }}
|
||||
{{ with .Values.grafana.operator.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{ end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" $ }}-grafana
|
||||
spec:
|
||||
allowCrossNamespaceImport: true
|
||||
resyncPeriod: {{ .Values.grafana.operator.resyncPeriod | quote | default "10m" }}
|
||||
folder: {{ .Values.grafana.operator.folder | quote }}
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
{{- if .Values.grafana.operator.matchLabels }}
|
||||
{{- toYaml .Values.grafana.operator.matchLabels | nindent 6 }}
|
||||
{{- else }}
|
||||
{{- fail "grafana.operator.matchLabels must be specified when grafana.operator.dashboardsConfigMapRefEnabled is true" }}
|
||||
{{- end }}
|
||||
configMapRef:
|
||||
name: {{ printf "%%s-%%s" (include "kube-prometheus-stack.fullname" $) "%(name)s" | trunc 63 | trimSuffix "-" }}
|
||||
key: %(name)s.json
|
||||
{{- end }}
|
||||
"""
|
||||
|
||||
def init_yaml_styles():
|
||||
represent_literal_str = change_style('|', SafeRepresenter.represent_str)
|
||||
yaml.add_representer(LiteralStr, represent_literal_str)
|
||||
|
||||
|
||||
def yaml_str_repr(struct, indent=2):
|
||||
"""represent yaml as a string"""
|
||||
text = yaml.dump(
|
||||
struct,
|
||||
width=1000, # to disable line wrapping
|
||||
default_flow_style=False # to disable multiple items on single line
|
||||
)
|
||||
text = textwrap.indent(text, ' ' * indent)
|
||||
return text
|
||||
|
||||
|
||||
def replace_nested_key(data, key, value, replace):
|
||||
if isinstance(data, dict):
|
||||
return {
|
||||
k: replace if k == key and v == value else replace_nested_key(v, key, value, replace)
|
||||
for k, v in data.items()
|
||||
}
|
||||
elif isinstance(data, list):
|
||||
return [replace_nested_key(v, key, value, replace) for v in data]
|
||||
else:
|
||||
return data
|
||||
|
||||
|
||||
def patch_dashboards_json(content, multicluster_key):
|
||||
try:
|
||||
content_struct = json.loads(content)
|
||||
|
||||
# multicluster
|
||||
overwrite_list = []
|
||||
for variable in content_struct['templating']['list']:
|
||||
if variable['name'] == 'cluster':
|
||||
variable['allValue'] = '.*'
|
||||
variable['hide'] = ':multicluster:'
|
||||
overwrite_list.append(variable)
|
||||
content_struct['templating']['list'] = overwrite_list
|
||||
|
||||
# Replace decimals=-1 with decimals= (nil value)
|
||||
# ref: https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/859
|
||||
content_struct = replace_nested_key(content_struct, "decimals", -1, None)
|
||||
|
||||
content = json.dumps(content_struct, separators=(',', ':'))
|
||||
content = content.replace('":multicluster:"', '`}}{{ if %s }}0{{ else }}2{{ end }}{{`' % multicluster_key,)
|
||||
|
||||
for line in replacement_map:
|
||||
content = content.replace(line, replacement_map[line]['replacement'])
|
||||
except (ValueError, KeyError):
|
||||
pass
|
||||
|
||||
return "{{`" + content + "`}}"
|
||||
|
||||
|
||||
def patch_json_set_timezone_as_variable(content):
|
||||
# content is no more in json format, so we have to replace using regex
|
||||
return re.sub(r'"timezone"\s*:\s*"(?:\\.|[^\"])*"', '"timezone": "`}}{{ .Values.grafana.defaultDashboardsTimezone }}{{`"', content, flags=re.IGNORECASE)
|
||||
|
||||
|
||||
def patch_json_set_editable_as_variable(content):
|
||||
# content is no more in json format, so we have to replace using regex
|
||||
return re.sub(r'"editable"\s*:\s*(?:true|false)', '"editable":`}}{{ .Values.grafana.defaultDashboardsEditable }}{{`', content, flags=re.IGNORECASE)
|
||||
|
||||
|
||||
def patch_json_set_interval_as_variable(content):
|
||||
# content is no more in json format, so we have to replace using regex
|
||||
return re.sub(r'"interval"\s*:\s*"(?:\\.|[^\"])*"', '"interval":"`}}{{ .Values.grafana.defaultDashboardsInterval }}{{`"', content, flags=re.IGNORECASE)
|
||||
|
||||
def jsonnet_import_callback(base, rel):
|
||||
# rel_base is the path relative to the current cwd.
|
||||
# see https://github.com/prometheus-community/helm-charts/issues/5283
|
||||
# for more details.
|
||||
rel_base = base
|
||||
if rel_base.startswith(os.getcwd()):
|
||||
rel_base = base[len(os.getcwd()):]
|
||||
|
||||
if "github.com" in rel:
|
||||
base = os.getcwd() + '/vendor/'
|
||||
elif "github.com" in rel_base:
|
||||
base = os.getcwd() + '/vendor/' + rel_base[rel_base.find('github.com'):]
|
||||
|
||||
if os.path.isfile(base + rel):
|
||||
return base + rel, open(base + rel).read().encode('utf-8')
|
||||
|
||||
raise RuntimeError('File not found')
|
||||
|
||||
|
||||
def write_group_to_file(resource_name, content, url, destination, min_kubernetes, max_kubernetes, multicluster_key):
|
||||
# initialize header
|
||||
lines = header % {
|
||||
'name': resource_name,
|
||||
'url': url,
|
||||
'condition': condition_map.get(resource_name, ''),
|
||||
'min_kubernetes': min_kubernetes,
|
||||
'max_kubernetes': max_kubernetes
|
||||
}
|
||||
|
||||
content = patch_dashboards_json(content, multicluster_key)
|
||||
content = patch_json_set_timezone_as_variable(content)
|
||||
content = patch_json_set_editable_as_variable(content)
|
||||
content = patch_json_set_interval_as_variable(content)
|
||||
|
||||
filename_struct = {resource_name + '.json': (LiteralStr(content))}
|
||||
# rules themselves
|
||||
lines += yaml_str_repr(filename_struct)
|
||||
|
||||
# footer
|
||||
lines += '{{- end }}'
|
||||
|
||||
lines_grafana_operator = grafana_dashboard_operator % {
|
||||
'name': resource_name,
|
||||
'condition': condition_map.get(resource_name, ''),
|
||||
'min_kubernetes': min_kubernetes,
|
||||
'max_kubernetes': max_kubernetes
|
||||
}
|
||||
|
||||
lines += lines_grafana_operator
|
||||
|
||||
filename = resource_name + '.yaml'
|
||||
new_filename = "%s/%s" % (destination, filename)
|
||||
|
||||
# make sure directories to store the file exist
|
||||
os.makedirs(destination, exist_ok=True)
|
||||
|
||||
# recreate the file
|
||||
with open(new_filename, 'w') as f:
|
||||
f.write(lines)
|
||||
|
||||
print("Generated %s" % new_filename)
|
||||
|
||||
|
||||
def main():
|
||||
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
init_yaml_styles()
|
||||
# read the rules, create a new template file per group
|
||||
for chart in charts:
|
||||
if 'git' in chart:
|
||||
if 'source' not in chart:
|
||||
chart['source'] = '_mixin.jsonnet'
|
||||
|
||||
url = chart['git']
|
||||
|
||||
print("Clone %s" % chart['git'])
|
||||
checkout_dir = os.path.basename(chart['git'])
|
||||
shutil.rmtree(checkout_dir, ignore_errors=True)
|
||||
|
||||
branch = "main"
|
||||
if 'branch' in chart:
|
||||
branch = chart['branch']
|
||||
|
||||
subprocess.run(["git", "init", "--initial-branch", "main", checkout_dir, "--quiet"])
|
||||
subprocess.run(["git", "-C", checkout_dir, "remote", "add", "origin", chart['git']])
|
||||
subprocess.run(["git", "-C", checkout_dir, "fetch", "--depth", "1", "origin", branch, "--quiet"])
|
||||
subprocess.run(["git", "-c", "advice.detachedHead=false", "-C", checkout_dir, "checkout", "FETCH_HEAD", "--quiet"])
|
||||
print("Generating rules from %s" % chart['source'])
|
||||
|
||||
mixin_file = chart['source']
|
||||
mixin_dir = checkout_dir + '/' + chart['cwd'] + '/'
|
||||
if os.path.exists(mixin_dir + "jsonnetfile.json"):
|
||||
print("Running jsonnet-bundler, because jsonnetfile.json exists")
|
||||
subprocess.run(["jb", "install"], cwd=mixin_dir)
|
||||
|
||||
if 'content' in chart:
|
||||
f = open(mixin_dir + mixin_file, "w")
|
||||
f.write(chart['content'])
|
||||
f.close()
|
||||
|
||||
mixin_vars = json.dumps(chart['mixin_vars'])
|
||||
|
||||
cwd = os.getcwd()
|
||||
os.chdir(mixin_dir)
|
||||
raw_text = '((import "%s") + %s)' % (mixin_file, mixin_vars)
|
||||
source = os.path.basename(mixin_file)
|
||||
elif 'source' in chart and chart['source'].startswith('http'):
|
||||
print("Generating rules from %s" % chart['source'])
|
||||
response = requests.get(chart['source'])
|
||||
if response.status_code != 200:
|
||||
print('Skipping the file, response code %s not equals 200' % response.status_code)
|
||||
continue
|
||||
raw_text = response.text
|
||||
source = chart['source']
|
||||
url = chart['source']
|
||||
else:
|
||||
with open(chart['source']) as f:
|
||||
raw_text = f.read()
|
||||
|
||||
source = chart['source']
|
||||
url = chart['source']
|
||||
|
||||
if ('max_kubernetes' not in chart):
|
||||
chart['max_kubernetes']="9.9.9-9"
|
||||
|
||||
if chart['type'] == 'yaml':
|
||||
yaml_text = yaml.full_load(raw_text)
|
||||
groups = yaml_text['items']
|
||||
for group in groups:
|
||||
for resource, content in group['data'].items():
|
||||
write_group_to_file(resource.replace('.json', ''), content, url, chart['destination'], chart['min_kubernetes'], chart['max_kubernetes'], chart['multicluster_key'])
|
||||
elif chart['type'] == 'jsonnet_mixin':
|
||||
json_text = json.loads(_jsonnet.evaluate_snippet(source, raw_text + '.grafanaDashboards', import_callback=jsonnet_import_callback))
|
||||
|
||||
if 'git' in chart:
|
||||
os.chdir(cwd)
|
||||
# is it already a dashboard structure or is it nested (etcd case)?
|
||||
flat_structure = bool(json_text.get('annotations'))
|
||||
if flat_structure:
|
||||
resource = os.path.basename(chart['source']).replace('.json', '')
|
||||
write_group_to_file(resource, json.dumps(json_text, indent=4), url, chart['destination'], chart['min_kubernetes'], chart['max_kubernetes'], chart['multicluster_key'])
|
||||
else:
|
||||
for resource, content in json_text.items():
|
||||
write_group_to_file(resource.replace('.json', ''), json.dumps(content, indent=4), url, chart['destination'], chart['min_kubernetes'], chart['max_kubernetes'], chart['multicluster_key'])
|
||||
elif chart['type'] == 'dashboard_json':
|
||||
write_group_to_file(os.path.basename(source).replace('.json', ''),
|
||||
raw_text, url, chart['destination'], chart['min_kubernetes'],
|
||||
chart['max_kubernetes'], chart['multicluster_key'])
|
||||
|
||||
|
||||
print("Finished")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
682
charts/kube-prometheus-stack/hack/sync_prometheus_rules.py
Normal file
682
charts/kube-prometheus-stack/hack/sync_prometheus_rules.py
Normal file
@ -0,0 +1,682 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Fetch alerting and aggregation rules from provided urls into this chart."""
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import textwrap
|
||||
|
||||
import _jsonnet
|
||||
import requests
|
||||
import yaml
|
||||
from yaml.representer import SafeRepresenter
|
||||
|
||||
|
||||
# https://stackoverflow.com/a/20863889/961092
|
||||
class LiteralStr(str):
|
||||
pass
|
||||
|
||||
|
||||
def change_style(style, representer):
|
||||
def new_representer(dumper, data):
|
||||
scalar = representer(dumper, data)
|
||||
scalar.style = style
|
||||
return scalar
|
||||
|
||||
return new_representer
|
||||
|
||||
|
||||
refs = {
|
||||
# renovate: git-refs=https://github.com/prometheus-operator/kube-prometheus branch=main
|
||||
'ref.kube-prometheus': '1e4df581de8897f16108bc4881be26e2a98c02b8',
|
||||
# renovate: git-refs=https://github.com/kubernetes-monitoring/kubernetes-mixin branch=master
|
||||
'ref.kubernetes-mixin': '6c82d5abe587b4c1dda7f1b0013af7d81e84c9fe',
|
||||
# renovate: git-refs=https://github.com/etcd-io/etcd branch=main
|
||||
'ref.etcd': '592c195ae21e8d58b7e2fef355e7067499d70edd',
|
||||
}
|
||||
|
||||
# Source files list
|
||||
charts = [
|
||||
{
|
||||
'git': 'https://github.com/prometheus-operator/kube-prometheus.git',
|
||||
'branch': refs['ref.kube-prometheus'],
|
||||
'source': 'main.libsonnet',
|
||||
'cwd': '',
|
||||
'destination': '../templates/prometheus/rules-1.14',
|
||||
'min_kubernetes': '1.14.0-0',
|
||||
'mixin': """
|
||||
local kp =
|
||||
(import 'jsonnet/kube-prometheus/main.libsonnet') + {
|
||||
values+:: {
|
||||
nodeExporter+: {
|
||||
mixin+: {
|
||||
_config+: {
|
||||
fsSelector: '$.Values.defaultRules.node.fsSelector',
|
||||
},
|
||||
},
|
||||
},
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
kubernetesControlPlane+: {
|
||||
kubeProxy: true,
|
||||
},
|
||||
},
|
||||
grafana: {},
|
||||
};
|
||||
|
||||
{
|
||||
groups: std.flattenArrays([
|
||||
kp[component][resource].spec.groups
|
||||
for component in std.objectFields(kp)
|
||||
for resource in std.filter(
|
||||
function(resource)
|
||||
kp[component][resource].kind == 'PrometheusRule',
|
||||
std.objectFields(kp[component])
|
||||
)
|
||||
]),
|
||||
}
|
||||
"""
|
||||
},
|
||||
{
|
||||
'git': 'https://github.com/kubernetes-monitoring/kubernetes-mixin.git',
|
||||
'branch': refs['ref.kubernetes-mixin'],
|
||||
'source': 'windows.libsonnet',
|
||||
'cwd': 'rules',
|
||||
'destination': '../templates/prometheus/rules-1.14',
|
||||
'min_kubernetes': '1.14.0-0',
|
||||
'mixin': """
|
||||
local kp =
|
||||
{ prometheusAlerts+:: {}, prometheusRules+:: {}} +
|
||||
(import "windows.libsonnet") +
|
||||
{'_config': {
|
||||
'clusterLabel': 'cluster',
|
||||
'windowsExporterSelector': 'job="windows-exporter"',
|
||||
'kubeStateMetricsSelector': 'job="kube-state-metrics"',
|
||||
}};
|
||||
|
||||
kp.prometheusAlerts + kp.prometheusRules
|
||||
"""
|
||||
},
|
||||
{
|
||||
'git': 'https://github.com/etcd-io/etcd.git',
|
||||
'branch': refs['ref.etcd'],
|
||||
'source': 'mixin.libsonnet',
|
||||
'cwd': 'contrib/mixin',
|
||||
'destination': '../templates/prometheus/rules-1.14',
|
||||
'min_kubernetes': '1.14.0-0',
|
||||
# Override the default etcd_instance_labels to get proper aggregation for etcd instances in k8s clusters (#2720)
|
||||
# see https://github.com/etcd-io/etcd/blob/1c22e7b36bc5d8543f1646212f2960f9fe503b8c/contrib/mixin/config.libsonnet#L13
|
||||
'mixin': """
|
||||
local kp =
|
||||
{ prometheusAlerts+:: {}, prometheusRules+:: {}} +
|
||||
(import "mixin.libsonnet") +
|
||||
{'_config': {
|
||||
'etcd_selector': 'job=~".*etcd.*"',
|
||||
'etcd_instance_labels': 'instance, pod',
|
||||
'scrape_interval_seconds': 30,
|
||||
'clusterLabel': 'job',
|
||||
}};
|
||||
|
||||
kp.prometheusAlerts + kp.prometheusRules
|
||||
"""
|
||||
},
|
||||
]
|
||||
|
||||
# Additional conditions map
|
||||
condition_map = {
|
||||
'alertmanager.rules': ' .Values.defaultRules.rules.alertmanager',
|
||||
'config-reloaders': ' .Values.defaultRules.rules.configReloaders',
|
||||
'etcd': ' .Values.kubeEtcd.enabled .Values.defaultRules.rules.etcd',
|
||||
'general.rules': ' .Values.defaultRules.rules.general',
|
||||
'k8s.rules.container_cpu_limits': ' .Values.defaultRules.rules.k8sContainerCpuLimits',
|
||||
'k8s.rules.container_cpu_requests': ' .Values.defaultRules.rules.k8sContainerCpuRequests',
|
||||
'k8s.rules.container_cpu_usage_seconds_total': ' .Values.defaultRules.rules.k8sContainerCpuUsageSecondsTotal',
|
||||
'k8s.rules.container_memory_cache': ' .Values.defaultRules.rules.k8sContainerMemoryCache',
|
||||
'k8s.rules.container_memory_limits': ' .Values.defaultRules.rules.k8sContainerMemoryLimits',
|
||||
'k8s.rules.container_memory_requests': ' .Values.defaultRules.rules.k8sContainerMemoryRequests',
|
||||
'k8s.rules.container_memory_rss': ' .Values.defaultRules.rules.k8sContainerMemoryRss',
|
||||
'k8s.rules.container_memory_swap': ' .Values.defaultRules.rules.k8sContainerMemorySwap',
|
||||
'k8s.rules.container_memory_working_set_bytes': ' .Values.defaultRules.rules.k8sContainerMemoryWorkingSetBytes',
|
||||
'k8s.rules.container_resource': ' .Values.defaultRules.rules.k8sContainerResource',
|
||||
'k8s.rules.pod_owner': ' .Values.defaultRules.rules.k8sPodOwner',
|
||||
'kube-apiserver-availability.rules': ' .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserverAvailability',
|
||||
'kube-apiserver-burnrate.rules': ' .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserverBurnrate',
|
||||
'kube-apiserver-histogram.rules': ' .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserverHistogram',
|
||||
'kube-apiserver-slos': ' .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserverSlos',
|
||||
'kube-prometheus-general.rules': ' .Values.defaultRules.rules.kubePrometheusGeneral',
|
||||
'kube-prometheus-node-recording.rules': ' .Values.defaultRules.rules.kubePrometheusNodeRecording',
|
||||
'kube-scheduler.rules': ' .Values.kubeScheduler.enabled .Values.defaultRules.rules.kubeSchedulerRecording',
|
||||
'kube-state-metrics': ' .Values.defaultRules.rules.kubeStateMetrics',
|
||||
'kubelet.rules': ' .Values.kubelet.enabled .Values.defaultRules.rules.kubelet',
|
||||
'kubernetes-apps': ' .Values.defaultRules.rules.kubernetesApps',
|
||||
'kubernetes-resources': ' .Values.defaultRules.rules.kubernetesResources',
|
||||
'kubernetes-storage': ' .Values.defaultRules.rules.kubernetesStorage',
|
||||
'kubernetes-system': ' .Values.defaultRules.rules.kubernetesSystem',
|
||||
'kubernetes-system-kube-proxy': ' .Values.kubeProxy.enabled .Values.defaultRules.rules.kubeProxy',
|
||||
'kubernetes-system-apiserver': ' .Values.defaultRules.rules.kubernetesSystem', # kubernetes-system was split into more groups in 1.14, one of them is kubernetes-system-apiserver
|
||||
'kubernetes-system-kubelet': ' .Values.defaultRules.rules.kubernetesSystem', # kubernetes-system was split into more groups in 1.14, one of them is kubernetes-system-kubelet
|
||||
'kubernetes-system-controller-manager': ' .Values.kubeControllerManager.enabled .Values.defaultRules.rules.kubeControllerManager',
|
||||
'kubernetes-system-scheduler': ' .Values.kubeScheduler.enabled .Values.defaultRules.rules.kubeSchedulerAlerting',
|
||||
'node-exporter.rules': ' .Values.defaultRules.rules.nodeExporterRecording',
|
||||
'node-exporter': ' .Values.defaultRules.rules.nodeExporterAlerting',
|
||||
'node.rules': ' .Values.defaultRules.rules.node',
|
||||
'node-network': ' .Values.defaultRules.rules.network',
|
||||
'prometheus-operator': ' .Values.defaultRules.rules.prometheusOperator',
|
||||
'prometheus': ' .Values.defaultRules.rules.prometheus', # kube-prometheus >= 1.14 uses prometheus as group instead of prometheus.rules
|
||||
'windows.node.rules': ' .Values.windowsMonitoring.enabled .Values.defaultRules.rules.windows',
|
||||
'windows.pod.rules': ' .Values.windowsMonitoring.enabled .Values.defaultRules.rules.windows',
|
||||
}
|
||||
|
||||
alert_condition_map = {
|
||||
'AggregatedAPIDown': 'semverCompare ">=1.18.0-0" $kubeTargetVersion',
|
||||
'AlertmanagerDown': '.Values.alertmanager.enabled',
|
||||
'CoreDNSDown': '.Values.kubeDns.enabled',
|
||||
'KubeAPIDown': '.Values.kubeApiServer.enabled', # there are more alerts which are left enabled, because they'll never fire without metrics
|
||||
'KubeControllerManagerDown': '.Values.kubeControllerManager.enabled',
|
||||
'KubeletDown': '.Values.prometheusOperator.kubeletService.enabled', # there are more alerts which are left enabled, because they'll never fire without metrics
|
||||
'KubeSchedulerDown': '.Values.kubeScheduler.enabled',
|
||||
'KubeStateMetricsDown': '.Values.kubeStateMetrics.enabled', # there are more alerts which are left enabled, because they'll never fire without metrics
|
||||
'NodeExporterDown': '.Values.nodeExporter.enabled',
|
||||
'PrometheusOperatorDown': '.Values.prometheusOperator.enabled',
|
||||
}
|
||||
|
||||
replacement_map = {
|
||||
'job="prometheus-operator"': {
|
||||
'replacement': 'job="{{ $operatorJob }}"',
|
||||
'init': '{{- $operatorJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "operator" }}'},
|
||||
'job="prometheus-k8s"': {
|
||||
'replacement': 'job="{{ $prometheusJob }}"',
|
||||
'init': '{{- $prometheusJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" }}'},
|
||||
'job="alertmanager-main"': {
|
||||
'replacement': 'job="{{ $alertmanagerJob }}"',
|
||||
'init': '{{- $alertmanagerJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager" }}'},
|
||||
'namespace="monitoring"': {
|
||||
'replacement': 'namespace="{{ $namespace }}"',
|
||||
'init': '{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }}'},
|
||||
'alertmanager-$1': {
|
||||
'replacement': '$1',
|
||||
'init': ''},
|
||||
'job="kube-state-metrics"': {
|
||||
'replacement': 'job="{{ $kubeStateMetricsJob }}"',
|
||||
'init': '{{- $kubeStateMetricsJob := include "kube-prometheus-stack-kube-state-metrics.name" . }}'},
|
||||
'job="{{ $kubeStateMetricsJob }}"': {
|
||||
'replacement': 'job="{{ $kubeStateMetricsJob }}", namespace=~"{{ $targetNamespace }}"',
|
||||
'limitGroup': ['kubernetes-apps'],
|
||||
'init': '{{- $targetNamespace := .Values.defaultRules.appNamespacesTarget }}'},
|
||||
'job="kubelet"': {
|
||||
'replacement': 'job="kubelet", namespace=~"{{ $targetNamespace }}"',
|
||||
'limitGroup': ['kubernetes-storage'],
|
||||
'init': '{{- $targetNamespace := .Values.defaultRules.appNamespacesTarget }}'},
|
||||
'runbook_url: https://runbooks.prometheus-operator.dev/runbooks/': {
|
||||
'replacement': 'runbook_url: {{ .Values.defaultRules.runbookUrl }}/',
|
||||
'init': ''},
|
||||
'(namespace,service)': {
|
||||
'replacement': '(namespace,service,cluster)',
|
||||
'init': ''},
|
||||
'(namespace, job, handler': {
|
||||
'replacement': '(cluster, namespace, job, handler',
|
||||
'init': ''},
|
||||
'$.Values.defaultRules.node.fsSelector': {
|
||||
'replacement': '{{ $.Values.defaultRules.node.fsSelector }}',
|
||||
'init': ''},
|
||||
}
|
||||
|
||||
# standard header
|
||||
header = '''{{- /*
|
||||
Generated from '%(name)s' group from %(url)s
|
||||
Do not change in-place! In order to change this file first read following link:
|
||||
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
|
||||
*/ -}}
|
||||
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
|
||||
{{- if and (semverCompare ">=%(min_kubernetes)s" $kubeTargetVersion) (semverCompare "<%(max_kubernetes)s" $kubeTargetVersion) .Values.defaultRules.create%(condition)s }}%(init_line)s
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ printf "%%s-%%s" (include "kube-prometheus-stack.fullname" .) "%(name)s" | trunc 63 | trimSuffix "-" }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
{{- if .Values.defaultRules.labels }}
|
||||
{{ toYaml .Values.defaultRules.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.defaultRules.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.defaultRules.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
groups:
|
||||
-'''
|
||||
|
||||
|
||||
def init_yaml_styles():
|
||||
represent_literal_str = change_style('|', SafeRepresenter.represent_str)
|
||||
yaml.add_representer(LiteralStr, represent_literal_str)
|
||||
|
||||
|
||||
def escape(s):
|
||||
return s.replace("{{", "{{`{{").replace("}}", "}}`}}").replace("{{`{{", "{{`{{`}}").replace("}}`}}", "{{`}}`}}")
|
||||
|
||||
|
||||
def fix_expr(rules):
|
||||
"""Remove trailing whitespaces and line breaks, which happen to creep in
|
||||
due to yaml import specifics;
|
||||
convert multiline expressions to literal style, |-"""
|
||||
for rule in rules:
|
||||
rule['expr'] = rule['expr'].rstrip()
|
||||
if '\n' in rule['expr']:
|
||||
rule['expr'] = LiteralStr(rule['expr'])
|
||||
|
||||
|
||||
def yaml_str_repr(struct, indent=4):
|
||||
"""represent yaml as a string"""
|
||||
text = yaml.dump(
|
||||
struct,
|
||||
width=1000, # to disable line wrapping
|
||||
default_flow_style=False # to disable multiple items on single line
|
||||
)
|
||||
text = escape(text) # escape {{ and }} for helm
|
||||
text = textwrap.indent(text, ' ' * indent)[indent - 1:] # indent everything, and remove very first line extra indentation
|
||||
return text
|
||||
|
||||
|
||||
def get_rule_group_condition(group_name, value_key):
|
||||
if group_name == '':
|
||||
return ''
|
||||
|
||||
if group_name.count(".Values") > 1:
|
||||
group_name = group_name.split(' ')[-1]
|
||||
|
||||
return group_name.replace('Values.defaultRules.rules', f"Values.defaultRules.{value_key}").strip()
|
||||
|
||||
|
||||
def add_rules_conditions(rules, rules_map, indent=4):
|
||||
"""Add if wrapper for rules, listed in rules_map"""
|
||||
rule_condition = '{{- if %s }}\n'
|
||||
for alert_name in rules_map:
|
||||
line_start = ' ' * indent + '- alert: '
|
||||
if line_start + alert_name in rules:
|
||||
rule_text = rule_condition % rules_map[alert_name]
|
||||
start = 0
|
||||
# to modify all alerts with same name
|
||||
while True:
|
||||
try:
|
||||
# add if condition
|
||||
index = rules.index(line_start + alert_name, start)
|
||||
start = index + len(rule_text) + 1
|
||||
rules = rules[:index] + rule_text + rules[index:]
|
||||
# add end of if
|
||||
try:
|
||||
next_index = rules.index(line_start, index + len(rule_text) + 1)
|
||||
except ValueError:
|
||||
# we found the last alert in file if there are no alerts after it
|
||||
next_index = len(rules)
|
||||
|
||||
# depending on the rule ordering in rules_map it's possible that an if statement from another rule is present at the end of this block.
|
||||
found_block_end = False
|
||||
last_line_index = next_index
|
||||
while not found_block_end:
|
||||
last_line_index = rules.rindex('\n', index, last_line_index - 1) # find the starting position of the last line
|
||||
last_line = rules[last_line_index + 1:next_index]
|
||||
|
||||
if last_line.startswith('{{- if'):
|
||||
next_index = last_line_index + 1 # move next_index back if the current block ends in an if statement
|
||||
continue
|
||||
|
||||
found_block_end = True
|
||||
rules = rules[:next_index] + '{{- end }}\n' + rules[next_index:]
|
||||
except ValueError:
|
||||
break
|
||||
return rules
|
||||
|
||||
|
||||
def add_rules_conditions_from_condition_map(rules, indent=4):
|
||||
"""Add if wrapper for rules, listed in alert_condition_map"""
|
||||
rules = add_rules_conditions(rules, alert_condition_map, indent)
|
||||
return rules
|
||||
|
||||
|
||||
def add_rules_per_rule_conditions(rules, group, indent=4):
|
||||
"""Add if wrapper for rules, listed in alert_condition_map"""
|
||||
rules_condition_map = {}
|
||||
for rule in group['rules']:
|
||||
if 'alert' in rule:
|
||||
rules_condition_map[rule['alert']] = f"not (.Values.defaultRules.disabled.{rule['alert']} | default false)"
|
||||
|
||||
rules = add_rules_conditions(rules, rules_condition_map, indent)
|
||||
return rules
|
||||
|
||||
|
||||
def add_custom_labels(rules_str, group, indent=4, label_indent=2):
|
||||
"""Add if wrapper for additional rules labels"""
|
||||
rule_group_labels = get_rule_group_condition(condition_map.get(group['name'], ''), 'additionalRuleGroupLabels')
|
||||
|
||||
additional_rule_labels = textwrap.indent("""
|
||||
{{- with .Values.defaultRules.additionalRuleLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with %s }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}""" % (rule_group_labels,), " " * (indent + label_indent * 2))
|
||||
|
||||
additional_rule_labels_condition_start = "\n" + " " * (indent + label_indent) + '{{- if or .Values.defaultRules.additionalRuleLabels %s }}' % (rule_group_labels,)
|
||||
additional_rule_labels_condition_end = "\n" + " " * (indent + label_indent) + '{{- end }}'
|
||||
# labels: cannot be null, if a rule does not have any labels by default, the labels block
|
||||
# should only be added if there are .Values defaultRules.additionalRuleLabels defined
|
||||
rule_seperator = "\n" + " " * indent + "-.*"
|
||||
label_seperator = "\n" + " " * indent + " labels:"
|
||||
section_seperator = "\n" + " " * indent + " \\S"
|
||||
section_seperator_len = len(section_seperator)-1
|
||||
rules_positions = re.finditer(rule_seperator,rules_str)
|
||||
|
||||
# fetch breakpoint between each set of rules
|
||||
ruleStartingLine = [(rule_position.start(),rule_position.end()) for rule_position in rules_positions]
|
||||
head = rules_str[:ruleStartingLine[0][0]]
|
||||
|
||||
# construct array of rules so they can be handled individually
|
||||
rules = []
|
||||
# pylint: disable=E1136
|
||||
# See https://github.com/pylint-dev/pylint/issues/1498 for None Values
|
||||
previousRule = None
|
||||
for r in ruleStartingLine:
|
||||
if previousRule != None:
|
||||
rules.append(rules_str[previousRule[0]:r[0]])
|
||||
previousRule = r
|
||||
rules.append(rules_str[previousRule[0]:len(rules_str)-1])
|
||||
|
||||
for i, rule in enumerate(rules):
|
||||
current_label = re.search(label_seperator,rule)
|
||||
if current_label:
|
||||
# `labels:` block exists
|
||||
# determine if there are any existing entries
|
||||
entries = re.search(section_seperator,rule[current_label.end():])
|
||||
if entries:
|
||||
entries_start = current_label.end()
|
||||
entries_end = entries.end()+current_label.end()-section_seperator_len
|
||||
rules[i] = rule[:entries_end] + additional_rule_labels_condition_start + additional_rule_labels + additional_rule_labels_condition_end + rule[entries_end:]
|
||||
else:
|
||||
# `labels:` does not contain any entries
|
||||
# append template to label section
|
||||
rules[i] += additional_rule_labels_condition_start + additional_rule_labels + additional_rule_labels_condition_end
|
||||
else:
|
||||
# `labels:` block does not exist
|
||||
# create it and append template
|
||||
rules[i] += additional_rule_labels_condition_start + "\n" + " " * indent + " labels:" + additional_rule_labels + additional_rule_labels_condition_end
|
||||
return head + "".join(rules) + "\n"
|
||||
|
||||
|
||||
def add_custom_annotations(rules, group, indent=4):
|
||||
"""Add if wrapper for additional rules annotations"""
|
||||
rule_condition = '{{- if .Values.defaultRules.additionalRuleAnnotations }}\n{{ toYaml .Values.defaultRules.additionalRuleAnnotations | indent 8 }}\n{{- end }}'
|
||||
rule_group_labels = get_rule_group_condition(condition_map.get(group['name'], ''), 'additionalRuleGroupAnnotations')
|
||||
rule_group_condition = '\n{{- if %s }}\n{{ toYaml %s | indent 8 }}\n{{- end }}' % (rule_group_labels, rule_group_labels)
|
||||
annotations = " annotations:"
|
||||
annotations_len = len(annotations) + 1
|
||||
rule_condition_len = len(rule_condition) + 1
|
||||
rule_group_condition_len = len(rule_group_condition)
|
||||
|
||||
separator = " " * indent + "- alert:.*"
|
||||
alerts_positions = re.finditer(separator,rules)
|
||||
alert = 0
|
||||
|
||||
for alert_position in alerts_positions:
|
||||
# Add rule_condition after 'annotations:' statement
|
||||
index = alert_position.end() + annotations_len + (rule_condition_len + rule_group_condition_len) * alert
|
||||
rules = rules[:index] + "\n" + rule_condition + rule_group_condition + rules[index:]
|
||||
alert += 1
|
||||
|
||||
return rules
|
||||
|
||||
|
||||
def add_custom_keep_firing_for(rules, indent=4):
|
||||
"""Add if wrapper for additional rules annotations"""
|
||||
indent_spaces = " " * indent + " "
|
||||
keep_firing_for = (indent_spaces + '{{- with .Values.defaultRules.keepFiringFor }}\n' +
|
||||
indent_spaces + 'keep_firing_for: "{{ . }}"\n' +
|
||||
indent_spaces + '{{- end }}')
|
||||
keep_firing_for_len = len(keep_firing_for) + 1
|
||||
|
||||
separator = " " * indent + " for:.*"
|
||||
alerts_positions = re.finditer(separator, rules)
|
||||
alert = 0
|
||||
|
||||
for alert_position in alerts_positions:
|
||||
# Add rule_condition after 'annotations:' statement
|
||||
index = alert_position.end() + keep_firing_for_len * alert
|
||||
rules = rules[:index] + "\n" + keep_firing_for + rules[index:]
|
||||
alert += 1
|
||||
|
||||
return rules
|
||||
|
||||
|
||||
def add_custom_for(rules, indent=4):
|
||||
"""Add custom 'for:' condition in rules"""
|
||||
replace_field = "for:"
|
||||
rules = add_custom_alert_rules(rules, replace_field, indent)
|
||||
|
||||
return rules
|
||||
|
||||
|
||||
def add_custom_severity(rules, indent=4):
|
||||
"""Add custom 'severity:' condition in rules"""
|
||||
replace_field = "severity:"
|
||||
rules = add_custom_alert_rules(rules, replace_field, indent)
|
||||
|
||||
return rules
|
||||
|
||||
|
||||
def add_custom_alert_rules(rules, key_to_replace, indent):
|
||||
"""Extend alert field to allow custom values"""
|
||||
key_to_replace_indented = ' ' * indent + key_to_replace
|
||||
alertkey_field = '- alert:'
|
||||
found_alert_key = False
|
||||
alertname = None
|
||||
updated_rules = ''
|
||||
|
||||
# pylint: disable=C0200
|
||||
i = 0
|
||||
while i < len(rules):
|
||||
if rules[i:i + len(alertkey_field)] == alertkey_field:
|
||||
found_alert_key = True
|
||||
start_index_word_after = i + len(alertkey_field) + 1
|
||||
end_index_alertkey_field = start_index_word_after
|
||||
while end_index_alertkey_field < len(rules) and rules[end_index_alertkey_field].isalnum():
|
||||
end_index_alertkey_field += 1
|
||||
|
||||
alertname = rules[start_index_word_after:end_index_alertkey_field]
|
||||
|
||||
if found_alert_key:
|
||||
if rules[i:i + len(key_to_replace_indented)] == key_to_replace_indented:
|
||||
found_alert_key = False
|
||||
start_index_key_value = i + len(key_to_replace_indented) + 1
|
||||
end_index_key_to_replace = start_index_key_value
|
||||
while end_index_key_to_replace < len(rules) and rules[end_index_key_to_replace].isalnum():
|
||||
end_index_key_to_replace += 1
|
||||
|
||||
word_after_key_to_replace = rules[start_index_key_value:end_index_key_to_replace]
|
||||
new_key = key_to_replace_indented + ' {{ dig "' + alertname + \
|
||||
'" "' + key_to_replace[:-1] + '" "' + \
|
||||
word_after_key_to_replace + '" .Values.customRules }}'
|
||||
updated_rules += new_key
|
||||
i = end_index_key_to_replace
|
||||
|
||||
updated_rules += rules[i]
|
||||
i += 1
|
||||
|
||||
return updated_rules
|
||||
|
||||
|
||||
def write_group_to_file(group, url, destination, min_kubernetes, max_kubernetes):
|
||||
fix_expr(group['rules'])
|
||||
group_name = group['name']
|
||||
|
||||
# prepare rules string representation
|
||||
rules = yaml_str_repr(group)
|
||||
# add replacements of custom variables and include their initialisation in case it's needed
|
||||
init_line = ''
|
||||
for line in replacement_map:
|
||||
if group_name in replacement_map[line].get('limitGroup', [group_name]) and line in rules:
|
||||
rules = rules.replace(line, replacement_map[line]['replacement'])
|
||||
if replacement_map[line]['init']:
|
||||
init_line += '\n' + replacement_map[line]['init']
|
||||
# append per-alert rules
|
||||
rules = add_custom_labels(rules, group)
|
||||
rules = add_custom_annotations(rules, group)
|
||||
rules = add_custom_keep_firing_for(rules)
|
||||
rules = add_custom_for(rules)
|
||||
rules = add_custom_severity(rules)
|
||||
rules = add_rules_conditions_from_condition_map(rules)
|
||||
rules = add_rules_per_rule_conditions(rules, group)
|
||||
# initialize header
|
||||
lines = header % {
|
||||
'name': sanitize_name(group['name']),
|
||||
'url': url,
|
||||
'condition': condition_map.get(group['name'], ''),
|
||||
'init_line': init_line,
|
||||
'min_kubernetes': min_kubernetes,
|
||||
'max_kubernetes': max_kubernetes
|
||||
}
|
||||
|
||||
# rules themselves
|
||||
lines += re.sub(
|
||||
r'\s(by|on) ?\(',
|
||||
r' \1 ({{ range $.Values.defaultRules.additionalAggregationLabels }}{{ . }},{{ end }}',
|
||||
rules,
|
||||
flags=re.IGNORECASE
|
||||
)
|
||||
|
||||
# footer
|
||||
lines += '{{- end }}'
|
||||
|
||||
filename = group['name'] + '.yaml'
|
||||
new_filename = "%s/%s" % (destination, filename)
|
||||
|
||||
# make sure directories to store the file exist
|
||||
os.makedirs(destination, exist_ok=True)
|
||||
|
||||
# recreate the file
|
||||
with open(new_filename, 'w') as f:
|
||||
f.write(lines)
|
||||
|
||||
print("Generated %s" % new_filename)
|
||||
|
||||
def write_rules_names_template():
|
||||
with open('../templates/prometheus/_rules.tpl', 'w') as f:
|
||||
f.write('''{{- /*
|
||||
Generated file. Do not change in-place! In order to change this file first read following link:
|
||||
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
|
||||
*/ -}}\n''')
|
||||
f.write('{{- define "rules.names" }}\n')
|
||||
f.write('rules:\n')
|
||||
for rule in condition_map:
|
||||
f.write(' - "%s"\n' % sanitize_name(rule))
|
||||
f.write('{{- end }}')
|
||||
|
||||
def main():
|
||||
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
init_yaml_styles()
|
||||
# read the rules, create a new template file per group
|
||||
for chart in charts:
|
||||
if 'git' in chart:
|
||||
if 'source' not in chart:
|
||||
chart['source'] = '_mixin.jsonnet'
|
||||
|
||||
url = chart['git']
|
||||
|
||||
print("Clone %s" % chart['git'])
|
||||
checkout_dir = os.path.basename(chart['git'])
|
||||
shutil.rmtree(checkout_dir, ignore_errors=True)
|
||||
|
||||
branch = "main"
|
||||
if 'branch' in chart:
|
||||
branch = chart['branch']
|
||||
|
||||
subprocess.run(["git", "init", "--initial-branch", "main", checkout_dir, "--quiet"])
|
||||
subprocess.run(["git", "-C", checkout_dir, "remote", "add", "origin", chart['git']])
|
||||
subprocess.run(["git", "-C", checkout_dir, "fetch", "--depth", "1", "origin", branch, "--quiet"])
|
||||
subprocess.run(["git", "-c", "advice.detachedHead=false", "-C", checkout_dir, "checkout", "FETCH_HEAD", "--quiet"])
|
||||
|
||||
if chart.get('mixin'):
|
||||
cwd = os.getcwd()
|
||||
|
||||
source_cwd = chart['cwd']
|
||||
mixin_file = chart['source']
|
||||
|
||||
mixin_dir = cwd + '/' + checkout_dir + '/' + source_cwd + '/'
|
||||
if os.path.exists(mixin_dir + "jsonnetfile.json"):
|
||||
print("Running jsonnet-bundler, because jsonnetfile.json exists")
|
||||
subprocess.run(["jb", "install"], cwd=mixin_dir)
|
||||
|
||||
if 'content' in chart:
|
||||
f = open(mixin_dir + mixin_file, "w")
|
||||
f.write(chart['content'])
|
||||
f.close()
|
||||
|
||||
print("Generating rules from %s" % mixin_file)
|
||||
print("Change cwd to %s" % checkout_dir + '/' + source_cwd)
|
||||
os.chdir(mixin_dir)
|
||||
|
||||
alerts = json.loads(_jsonnet.evaluate_snippet(mixin_file, chart['mixin'], import_callback=jsonnet_import_callback))
|
||||
|
||||
os.chdir(cwd)
|
||||
else:
|
||||
with open(checkout_dir + '/' + chart['source'], "r") as f:
|
||||
raw_text = f.read()
|
||||
|
||||
alerts = yaml.full_load(raw_text)
|
||||
|
||||
else:
|
||||
url = chart['source']
|
||||
print("Generating rules from %s" % url)
|
||||
response = requests.get(url)
|
||||
if response.status_code != 200:
|
||||
print('Skipping the file, response code %s not equals 200' % response.status_code)
|
||||
continue
|
||||
raw_text = response.text
|
||||
if chart.get('mixin'):
|
||||
alerts = json.loads(_jsonnet.evaluate_snippet(url, raw_text + '.prometheusAlerts'))
|
||||
else:
|
||||
alerts = yaml.full_load(raw_text)
|
||||
|
||||
if ('max_kubernetes' not in chart):
|
||||
chart['max_kubernetes']="9.9.9-9"
|
||||
|
||||
# etcd workaround, their file don't have spec level
|
||||
groups = alerts['spec']['groups'] if alerts.get('spec') else alerts['groups']
|
||||
for group in groups:
|
||||
write_group_to_file(group, url, chart['destination'], chart['min_kubernetes'], chart['max_kubernetes'])
|
||||
|
||||
# write rules.names named template
|
||||
write_rules_names_template()
|
||||
|
||||
print("Finished")
|
||||
|
||||
|
||||
def sanitize_name(name):
|
||||
return re.sub('[_]', '-', name).lower()
|
||||
|
||||
|
||||
def jsonnet_import_callback(base, rel):
|
||||
# rel_base is the path relative to the current cwd.
|
||||
# see https://github.com/prometheus-community/helm-charts/issues/5283
|
||||
# for more details.
|
||||
rel_base = base
|
||||
if rel_base.startswith(os.getcwd()):
|
||||
rel_base = base[len(os.getcwd()):]
|
||||
|
||||
if "github.com" in rel:
|
||||
base = os.getcwd() + '/vendor/'
|
||||
elif "github.com" in rel_base:
|
||||
base = os.getcwd() + '/vendor/' + rel_base[rel_base.find('github.com'):]
|
||||
|
||||
if os.path.isfile(base + rel):
|
||||
return base + rel, open(base + rel).read().encode('utf-8')
|
||||
|
||||
raise RuntimeError('File not found')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
47
charts/kube-prometheus-stack/hack/update_crds.sh
Normal file
47
charts/kube-prometheus-stack/hack/update_crds.sh
Normal file
@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)
|
||||
|
||||
if [[ $(uname -s) = "Darwin" ]]; then
|
||||
VERSION="$(grep ^appVersion "${SCRIPT_DIR}/../Chart.yaml" | sed 's/appVersion: //g')"
|
||||
else
|
||||
VERSION="$(grep ^appVersion "${SCRIPT_DIR}/../Chart.yaml" | sed 's/appVersion:\s//g')"
|
||||
fi
|
||||
|
||||
FILES=(
|
||||
"crd-alertmanagerconfigs.yaml : monitoring.coreos.com_alertmanagerconfigs.yaml"
|
||||
"crd-alertmanagers.yaml : monitoring.coreos.com_alertmanagers.yaml"
|
||||
"crd-podmonitors.yaml : monitoring.coreos.com_podmonitors.yaml"
|
||||
"crd-probes.yaml : monitoring.coreos.com_probes.yaml"
|
||||
"crd-prometheusagents.yaml : monitoring.coreos.com_prometheusagents.yaml"
|
||||
"crd-prometheuses.yaml : monitoring.coreos.com_prometheuses.yaml"
|
||||
"crd-prometheusrules.yaml : monitoring.coreos.com_prometheusrules.yaml"
|
||||
"crd-scrapeconfigs.yaml : monitoring.coreos.com_scrapeconfigs.yaml"
|
||||
"crd-servicemonitors.yaml : monitoring.coreos.com_servicemonitors.yaml"
|
||||
"crd-thanosrulers.yaml : monitoring.coreos.com_thanosrulers.yaml"
|
||||
)
|
||||
|
||||
for line in "${FILES[@]}"; do
|
||||
DESTINATION=$(echo "${line%%:*}" | xargs)
|
||||
SOURCE=$(echo "${line##*:}" | xargs)
|
||||
|
||||
URL="https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/$VERSION/example/prometheus-operator-crd/$SOURCE"
|
||||
|
||||
echo -e "Downloading Prometheus Operator CRD with Version ${VERSION}:\n${URL}\n"
|
||||
|
||||
echo "# ${URL}" >"${SCRIPT_DIR}/../charts/crds/crds/${DESTINATION}"
|
||||
|
||||
if ! curl --silent --retry-all-errors --fail --location "${URL}" >>"${SCRIPT_DIR}/../charts/crds/crds/${DESTINATION}"; then
|
||||
echo -e "Failed to download ${URL}!"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
{
|
||||
for file in "${SCRIPT_DIR}/../charts/crds/crds/"crd*.yaml; do
|
||||
cat "${file}"
|
||||
echo "---"
|
||||
done
|
||||
} | bzip2 --best --compress --keep --stdout - >"${SCRIPT_DIR}/../charts/crds/files/crds.bz2"
|
||||
44
charts/kube-prometheus-stack/hack/update_mixins.sh
Normal file
44
charts/kube-prometheus-stack/hack/update_mixins.sh
Normal file
@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if ! which jb &>/dev/null; then
|
||||
echo "'jb' command not found
|
||||
Install jsonnet-bundler from https://github.com/jsonnet-bundler/jsonnet-bundler"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case $(sed --help 2>&1) in
|
||||
*BusyBox* | *GNU*) _sed_i() { sed -i "$@"; } ;;
|
||||
*) _sed_i() { sed -i '' "$@"; } ;;
|
||||
esac
|
||||
|
||||
SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)
|
||||
|
||||
trap 'rm -rf "${SCRIPT_DIR}/tmp"' EXIT
|
||||
|
||||
rm -rf "${SCRIPT_DIR}/tmp"
|
||||
mkdir "${SCRIPT_DIR}/tmp"
|
||||
|
||||
git clone --depth 1 --quiet https://github.com/prometheus-operator/kube-prometheus.git "${SCRIPT_DIR}/tmp/kube-prometheus"
|
||||
git clone --depth 1 --quiet https://github.com/kubernetes-monitoring/kubernetes-mixin.git "${SCRIPT_DIR}/tmp/kubernetes-mixin"
|
||||
git clone --depth 1 --quiet https://github.com/etcd-io/etcd.git "${SCRIPT_DIR}/tmp/etcd"
|
||||
|
||||
for REPO_PATH in "${SCRIPT_DIR}/tmp/"*; do
|
||||
SHA=$(git -C "$REPO_PATH" log -1 --pretty=format:"%H")
|
||||
REPO_NAME=$(basename "$REPO_PATH")
|
||||
echo "Updating $REPO_NAME to $SHA"
|
||||
_sed_i -e "s/'ref.$REPO_NAME'.*:.*'.*'/'ref.$REPO_NAME': '$SHA'/" "${SCRIPT_DIR}/sync_grafana_dashboards.py"
|
||||
_sed_i -e "s/'ref.$REPO_NAME'.*:.*'.*'/'ref.$REPO_NAME': '$SHA'/" "${SCRIPT_DIR}/sync_prometheus_rules.py"
|
||||
done
|
||||
|
||||
export PIP_DISABLE_PIP_VERSION_CHECK=1
|
||||
|
||||
python3 -m venv "${SCRIPT_DIR}/tmp/venv"
|
||||
# shellcheck disable=SC1091
|
||||
source "${SCRIPT_DIR}/tmp/venv/bin/activate"
|
||||
|
||||
pip3 install -r "${SCRIPT_DIR}/requirements.txt"
|
||||
|
||||
"${SCRIPT_DIR}/sync_grafana_dashboards.py"
|
||||
"${SCRIPT_DIR}/sync_prometheus_rules.py"
|
||||
13
charts/kube-prometheus-stack/templates/NOTES.txt
Normal file
13
charts/kube-prometheus-stack/templates/NOTES.txt
Normal file
@ -0,0 +1,13 @@
|
||||
{{ $.Chart.Name }} has been installed. Check its status by running:
|
||||
kubectl --namespace {{ template "kube-prometheus-stack.namespace" . }} get pods -l "release={{ $.Release.Name }}"
|
||||
|
||||
Get Grafana '{{ .Values.grafana.adminUser }}' user password by running:
|
||||
|
||||
kubectl --namespace {{ template "kube-prometheus-stack.namespace" . }} get secrets {{ $.Release.Name }}-grafana -o jsonpath="{.data.admin-password}" | base64 -d ; echo
|
||||
|
||||
Access Grafana local instance:
|
||||
|
||||
export POD_NAME=$(kubectl --namespace {{ template "kube-prometheus-stack.namespace" . }} get pod -l "app.kubernetes.io/name={{ default "grafana" .Values.grafana.name }},app.kubernetes.io/instance={{ $.Release.Name }}" -oname)
|
||||
kubectl --namespace {{ template "kube-prometheus-stack.namespace" . }} port-forward $POD_NAME 3000
|
||||
|
||||
Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
|
||||
356
charts/kube-prometheus-stack/templates/_helpers.tpl
Normal file
356
charts/kube-prometheus-stack/templates/_helpers.tpl
Normal file
@ -0,0 +1,356 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/* Expand the name of the chart. This is suffixed with -alertmanager, which means subtract 13 from longest 63 available */}}
|
||||
{{- define "kube-prometheus-stack.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 50 | trimSuffix "-" -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
The components in this chart create additional resources that expand the longest created name strings.
|
||||
The longest name that gets created adds and extra 37 characters, so truncation should be 63-35=26.
|
||||
*/}}
|
||||
{{- define "kube-prometheus-stack.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 26 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 26 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 26 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Fullname suffixed with -operator */}}
|
||||
{{/* Adding 9 to 26 truncation of kube-prometheus-stack.fullname */}}
|
||||
{{- define "kube-prometheus-stack.operator.fullname" -}}
|
||||
{{- if .Values.prometheusOperator.fullnameOverride -}}
|
||||
{{- .Values.prometheusOperator.fullnameOverride | trunc 35 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-operator" (include "kube-prometheus-stack.fullname" .) -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* Prometheus custom resource instance name */}}
|
||||
{{- define "kube-prometheus-stack.prometheus.crname" -}}
|
||||
{{- if .Values.cleanPrometheusOperatorObjectNames }}
|
||||
{{- include "kube-prometheus-stack.fullname" . }}
|
||||
{{- else }}
|
||||
{{- print (include "kube-prometheus-stack.fullname" .) "-prometheus" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* Prometheus apiVersion for networkpolicy */}}
|
||||
{{- define "kube-prometheus-stack.prometheus.networkPolicy.apiVersion" -}}
|
||||
{{- print "networking.k8s.io/v1" -}}
|
||||
{{- end }}
|
||||
|
||||
{{/* Alertmanager custom resource instance name */}}
|
||||
{{- define "kube-prometheus-stack.alertmanager.crname" -}}
|
||||
{{- if .Values.cleanPrometheusOperatorObjectNames }}
|
||||
{{- include "kube-prometheus-stack.fullname" . }}
|
||||
{{- else }}
|
||||
{{- print (include "kube-prometheus-stack.fullname" .) "-alertmanager" -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* ThanosRuler custom resource instance name */}}
|
||||
{{/* Subtracting 1 from 26 truncation of kube-prometheus-stack.fullname */}}
|
||||
{{- define "kube-prometheus-stack.thanosRuler.crname" -}}
|
||||
{{- if .Values.cleanPrometheusOperatorObjectNames }}
|
||||
{{- include "kube-prometheus-stack.fullname" . }}
|
||||
{{- else }}
|
||||
{{- print (include "kube-prometheus-stack.fullname" . | trunc 25 | trimSuffix "-") "-thanos-ruler" -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* Shortened name suffixed with thanos-ruler */}}
|
||||
{{- define "kube-prometheus-stack.thanosRuler.name" -}}
|
||||
{{- default (printf "%s-thanos-ruler" (include "kube-prometheus-stack.name" .)) .Values.thanosRuler.name -}}
|
||||
{{- end }}
|
||||
|
||||
{{/* Create chart name and version as used by the chart label. */}}
|
||||
{{- define "kube-prometheus-stack.chartref" -}}
|
||||
{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}}
|
||||
{{- end }}
|
||||
|
||||
{{/* Generate basic labels */}}
|
||||
{{- define "kube-prometheus-stack.labels" }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: "{{ replace "+" "_" .Chart.Version }}"
|
||||
app.kubernetes.io/part-of: {{ template "kube-prometheus-stack.name" . }}
|
||||
chart: {{ template "kube-prometheus-stack.chartref" . }}
|
||||
release: {{ $.Release.Name | quote }}
|
||||
heritage: {{ $.Release.Service | quote }}
|
||||
{{- if .Values.commonLabels}}
|
||||
{{ toYaml .Values.commonLabels }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* Create the name of kube-prometheus-stack service account to use */}}
|
||||
{{- define "kube-prometheus-stack.operator.serviceAccountName" -}}
|
||||
{{- if .Values.prometheusOperator.serviceAccount.create -}}
|
||||
{{ default (include "kube-prometheus-stack.operator.fullname" .) .Values.prometheusOperator.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.prometheusOperator.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Create the name of kube-prometheus-stack service account to use */}}
|
||||
{{- define "kube-prometheus-stack.operator.admissionWebhooks.serviceAccountName" -}}
|
||||
{{- if .Values.prometheusOperator.serviceAccount.create -}}
|
||||
{{ default (printf "%s-webhook" (include "kube-prometheus-stack.operator.fullname" .)) .Values.prometheusOperator.admissionWebhooks.deployment.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.prometheusOperator.admissionWebhooks.deployment.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Create the name of prometheus service account to use */}}
|
||||
{{- define "kube-prometheus-stack.prometheus.serviceAccountName" -}}
|
||||
{{- if .Values.prometheus.serviceAccount.create -}}
|
||||
{{ default (print (include "kube-prometheus-stack.fullname" .) "-prometheus") .Values.prometheus.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.prometheus.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Create the name of alertmanager service account to use */}}
|
||||
{{- define "kube-prometheus-stack.alertmanager.serviceAccountName" -}}
|
||||
{{- if .Values.alertmanager.serviceAccount.create -}}
|
||||
{{ default (print (include "kube-prometheus-stack.fullname" .) "-alertmanager") .Values.alertmanager.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.alertmanager.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
|
||||
{{- end -}}
|
||||
|
||||
{{/* Create the name of thanosRuler service account to use */}}
|
||||
{{- define "kube-prometheus-stack.thanosRuler.serviceAccountName" -}}
|
||||
{{- if .Values.thanosRuler.serviceAccount.create -}}
|
||||
{{ default (include "kube-prometheus-stack.thanosRuler.name" .) .Values.thanosRuler.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.thanosRuler.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "kube-prometheus-stack.namespace" -}}
|
||||
{{- if .Values.namespaceOverride -}}
|
||||
{{- .Values.namespaceOverride -}}
|
||||
{{- else -}}
|
||||
{{- .Release.Namespace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Use the grafana namespace override for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "kube-prometheus-stack-grafana.namespace" -}}
|
||||
{{- if .Values.grafana.namespaceOverride -}}
|
||||
{{- .Values.grafana.namespaceOverride -}}
|
||||
{{- else -}}
|
||||
{{- .Release.Namespace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Use the Alertmanager namespace override for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "kube-prometheus-stack-alertmanager.namespace" -}}
|
||||
{{- if .Values.alertmanager.namespaceOverride -}}
|
||||
{{- .Values.alertmanager.namespaceOverride -}}
|
||||
{{- else -}}
|
||||
{{- include "kube-prometheus-stack.namespace" . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Allow kube-state-metrics job name to be overridden
|
||||
*/}}
|
||||
{{- define "kube-prometheus-stack-kube-state-metrics.name" -}}
|
||||
{{- if index .Values "kube-state-metrics" "nameOverride" -}}
|
||||
{{- index .Values "kube-state-metrics" "nameOverride" -}}
|
||||
{{- else -}}
|
||||
{{- print "kube-state-metrics" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Use the kube-state-metrics namespace override for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "kube-prometheus-stack-kube-state-metrics.namespace" -}}
|
||||
{{- if index .Values "kube-state-metrics" "namespaceOverride" -}}
|
||||
{{- index .Values "kube-state-metrics" "namespaceOverride" -}}
|
||||
{{- else -}}
|
||||
{{- .Release.Namespace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Use the prometheus-node-exporter namespace override for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "kube-prometheus-stack-prometheus-node-exporter.namespace" -}}
|
||||
{{- if index .Values "prometheus-node-exporter" "namespaceOverride" -}}
|
||||
{{- index .Values "prometheus-node-exporter" "namespaceOverride" -}}
|
||||
{{- else -}}
|
||||
{{- .Release.Namespace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Allow KubeVersion to be overridden. */}}
|
||||
{{- define "kube-prometheus-stack.kubeVersion" -}}
|
||||
{{- default .Capabilities.KubeVersion.Version .Values.kubeVersionOverride -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Get Ingress API Version */}}
|
||||
{{- define "kube-prometheus-stack.ingress.apiVersion" -}}
|
||||
{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" (include "kube-prometheus-stack.kubeVersion" .)) -}}
|
||||
{{- print "networking.k8s.io/v1" -}}
|
||||
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
|
||||
{{- print "networking.k8s.io/v1beta1" -}}
|
||||
{{- else -}}
|
||||
{{- print "extensions/v1beta1" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Check Ingress stability */}}
|
||||
{{- define "kube-prometheus-stack.ingress.isStable" -}}
|
||||
{{- eq (include "kube-prometheus-stack.ingress.apiVersion" .) "networking.k8s.io/v1" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Check Ingress supports pathType */}}
|
||||
{{/* pathType was added to networking.k8s.io/v1beta1 in Kubernetes 1.18 */}}
|
||||
{{- define "kube-prometheus-stack.ingress.supportsPathType" -}}
|
||||
{{- or (eq (include "kube-prometheus-stack.ingress.isStable" .) "true") (and (eq (include "kube-prometheus-stack.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" (include "kube-prometheus-stack.kubeVersion" .))) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Get Policy API Version */}}
|
||||
{{- define "kube-prometheus-stack.pdb.apiVersion" -}}
|
||||
{{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">= 1.21-0" (include "kube-prometheus-stack.kubeVersion" .)) -}}
|
||||
{{- print "policy/v1" -}}
|
||||
{{- else -}}
|
||||
{{- print "policy/v1beta1" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Get value based on current Kubernetes version */}}
|
||||
{{- define "kube-prometheus-stack.kubeVersionDefaultValue" -}}
|
||||
{{- $values := index . 0 -}}
|
||||
{{- $kubeVersion := index . 1 -}}
|
||||
{{- $old := index . 2 -}}
|
||||
{{- $new := index . 3 -}}
|
||||
{{- $default := index . 4 -}}
|
||||
{{- if kindIs "invalid" $default -}}
|
||||
{{- if semverCompare $kubeVersion (include "kube-prometheus-stack.kubeVersion" $values) -}}
|
||||
{{- print $new -}}
|
||||
{{- else -}}
|
||||
{{- print $old -}}
|
||||
{{- end -}}
|
||||
{{- else -}}
|
||||
{{- print $default }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Get value for kube-controller-manager depending on insecure scraping availability */}}
|
||||
{{- define "kube-prometheus-stack.kubeControllerManager.insecureScrape" -}}
|
||||
{{- $values := index . 0 -}}
|
||||
{{- $insecure := index . 1 -}}
|
||||
{{- $secure := index . 2 -}}
|
||||
{{- $userValue := index . 3 -}}
|
||||
{{- include "kube-prometheus-stack.kubeVersionDefaultValue" (list $values ">= 1.22-0" $insecure $secure $userValue) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Get value for kube-scheduler depending on insecure scraping availability */}}
|
||||
{{- define "kube-prometheus-stack.kubeScheduler.insecureScrape" -}}
|
||||
{{- $values := index . 0 -}}
|
||||
{{- $insecure := index . 1 -}}
|
||||
{{- $secure := index . 2 -}}
|
||||
{{- $userValue := index . 3 -}}
|
||||
{{- include "kube-prometheus-stack.kubeVersionDefaultValue" (list $values ">= 1.23-0" $insecure $secure $userValue) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Sets default scrape limits for servicemonitor */}}
|
||||
{{- define "servicemonitor.scrapeLimits" -}}
|
||||
{{- with .sampleLimit }}
|
||||
sampleLimit: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .targetLimit }}
|
||||
targetLimit: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .labelLimit }}
|
||||
labelLimit: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .labelNameLengthLimit }}
|
||||
labelNameLengthLimit: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .labelValueLengthLimit }}
|
||||
labelValueLengthLimit: {{ . }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
To help compatibility with other charts which use global.imagePullSecrets.
|
||||
Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style).
|
||||
global:
|
||||
imagePullSecrets:
|
||||
- name: pullSecret1
|
||||
- name: pullSecret2
|
||||
|
||||
or
|
||||
|
||||
global:
|
||||
imagePullSecrets:
|
||||
- pullSecret1
|
||||
- pullSecret2
|
||||
*/}}
|
||||
{{- define "kube-prometheus-stack.imagePullSecrets" -}}
|
||||
{{- range .Values.global.imagePullSecrets }}
|
||||
{{- if eq (typeOf .) "map[string]interface {}" }}
|
||||
- {{ toYaml . | trim }}
|
||||
{{- else }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kube-prometheus-stack.operator.admission-webhook.dnsNames" }}
|
||||
{{- $fullname := include "kube-prometheus-stack.operator.fullname" . }}
|
||||
{{- $namespace := include "kube-prometheus-stack.namespace" . }}
|
||||
{{- $fullname }}
|
||||
{{ $fullname }}.{{ $namespace }}.svc
|
||||
{{- if .Values.prometheusOperator.admissionWebhooks.deployment.enabled }}
|
||||
{{ $fullname }}-webhook
|
||||
{{ $fullname }}-webhook.{{ $namespace }}.svc
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* To help configure the kubelet servicemonitor for http or https. */}}
|
||||
{{- define "kube-prometheus-stack.kubelet.scheme" }}
|
||||
{{- if .Values.kubelet.serviceMonitor.https }}https{{ else }}http{{ end }}
|
||||
{{- end }}
|
||||
{{- define "kube-prometheus-stack.kubelet.authConfig" }}
|
||||
{{- if .Values.kubelet.serviceMonitor.https }}
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecureSkipVerify: {{ .Values.kubelet.serviceMonitor.insecureSkipVerify }}
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/* To help configure anti-affinity rules for Prometheus pods */}}
|
||||
{{- define "kube-prometheus-stack.prometheus.pod-anti-affinity.matchExpressions" }}
|
||||
{{- if .Values.prometheus.agentMode }}
|
||||
- {key: app.kubernetes.io/name, operator: In, values: [prometheus-agent]}
|
||||
- {key: app.kubernetes.io/instance, operator: In, values: [{{ template "kube-prometheus-stack.prometheus.crname" . }}]}
|
||||
{{- else }}
|
||||
- {key: app.kubernetes.io/name, operator: In, values: [prometheus]}
|
||||
- {key: app.kubernetes.io/instance, operator: In, values: [{{ template "kube-prometheus-stack.prometheus.crname" . }}]}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,203 @@
|
||||
{{- if .Values.alertmanager.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Alertmanager
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.alertmanager.crname" . }}
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
{{- if .Values.alertmanager.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.alertmanager.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.image }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.alertmanager.alertmanagerSpec.image.registry -}}
|
||||
{{- if and .Values.alertmanager.alertmanagerSpec.image.tag .Values.alertmanager.alertmanagerSpec.image.sha }}
|
||||
image: "{{ $registry }}/{{ .Values.alertmanager.alertmanagerSpec.image.repository }}:{{ .Values.alertmanager.alertmanagerSpec.image.tag }}@sha256:{{ .Values.alertmanager.alertmanagerSpec.image.sha }}"
|
||||
{{- else if .Values.alertmanager.alertmanagerSpec.image.sha }}
|
||||
image: "{{ $registry }}/{{ .Values.alertmanager.alertmanagerSpec.image.repository }}@sha256:{{ .Values.alertmanager.alertmanagerSpec.image.sha }}"
|
||||
{{- else if .Values.alertmanager.alertmanagerSpec.image.tag }}
|
||||
image: "{{ $registry }}/{{ .Values.alertmanager.alertmanagerSpec.image.repository }}:{{ .Values.alertmanager.alertmanagerSpec.image.tag }}"
|
||||
{{- else }}
|
||||
image: "{{ $registry }}/{{ .Values.alertmanager.alertmanagerSpec.image.repository }}"
|
||||
{{- end }}
|
||||
version: {{ default .Values.alertmanager.alertmanagerSpec.image.tag .Values.alertmanager.alertmanagerSpec.version }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.image.sha }}
|
||||
sha: {{ .Values.alertmanager.alertmanagerSpec.image.sha }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
replicas: {{ .Values.alertmanager.alertmanagerSpec.replicas }}
|
||||
listenLocal: {{ .Values.alertmanager.alertmanagerSpec.listenLocal }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.serviceName }}
|
||||
serviceName: {{ tpl .Values.alertmanager.alertmanagerSpec.serviceName . }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }}
|
||||
automountServiceAccountToken: {{ .Values.alertmanager.alertmanagerSpec.automountServiceAccountToken }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.externalUrl }}
|
||||
externalUrl: "{{ tpl .Values.alertmanager.alertmanagerSpec.externalUrl . }}"
|
||||
{{- else if and .Values.alertmanager.ingress.enabled .Values.alertmanager.ingress.hosts }}
|
||||
externalUrl: "http://{{ tpl (index .Values.alertmanager.ingress.hosts 0) . }}{{ .Values.alertmanager.alertmanagerSpec.routePrefix }}"
|
||||
{{- else }}
|
||||
externalUrl: http://{{ template "kube-prometheus-stack.fullname" . }}-alertmanager.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.alertmanager.service.port }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.nodeSelector | indent 4 }}
|
||||
{{- end }}
|
||||
paused: {{ .Values.alertmanager.alertmanagerSpec.paused }}
|
||||
logFormat: {{ .Values.alertmanager.alertmanagerSpec.logFormat | quote }}
|
||||
logLevel: {{ .Values.alertmanager.alertmanagerSpec.logLevel | quote }}
|
||||
retention: {{ .Values.alertmanager.alertmanagerSpec.retention | quote }}
|
||||
{{- with .Values.alertmanager.enableFeatures }}
|
||||
enableFeatures:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.secrets }}
|
||||
secrets:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.secrets | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.configSecret }}
|
||||
configSecret: {{ .Values.alertmanager.alertmanagerSpec.configSecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.configMaps }}
|
||||
configMaps:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.configMaps | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.alertmanagerConfigSelector }}
|
||||
alertmanagerConfigSelector:
|
||||
{{ tpl (toYaml .Values.alertmanager.alertmanagerSpec.alertmanagerConfigSelector | indent 4) . }}
|
||||
{{ else }}
|
||||
alertmanagerConfigSelector: {}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.alertmanagerConfigNamespaceSelector }}
|
||||
alertmanagerConfigNamespaceSelector:
|
||||
{{ tpl (toYaml .Values.alertmanager.alertmanagerSpec.alertmanagerConfigNamespaceSelector | indent 4) . }}
|
||||
{{ else }}
|
||||
alertmanagerConfigNamespaceSelector: {}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.web }}
|
||||
web:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.web | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.alertmanagerConfiguration }}
|
||||
alertmanagerConfiguration:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.alertmanagerConfiguration | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.alertmanagerConfigMatcherStrategy }}
|
||||
alertmanagerConfigMatcherStrategy:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.alertmanagerConfigMatcherStrategy | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.resources }}
|
||||
resources:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.resources | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.routePrefix }}
|
||||
routePrefix: "{{ .Values.alertmanager.alertmanagerSpec.routePrefix }}"
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.securityContext }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.securityContext | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.storage }}
|
||||
storage:
|
||||
{{ tpl (toYaml .Values.alertmanager.alertmanagerSpec.storage | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- with .Values.alertmanager.alertmanagerSpec.persistentVolumeClaimRetentionPolicy }}
|
||||
persistentVolumeClaimRetentionPolicy:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.podMetadata }}
|
||||
podMetadata:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.podMetadata | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.alertmanager.alertmanagerSpec.podAntiAffinity .Values.alertmanager.alertmanagerSpec.affinity }}
|
||||
affinity:
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.affinity }}
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.affinity | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.alertmanager.alertmanagerSpec.podAntiAffinity "hard" }}
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: {{ .Values.alertmanager.alertmanagerSpec.podAntiAffinityTopologyKey }}
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- {key: app.kubernetes.io/name, operator: In, values: [alertmanager]}
|
||||
- {key: alertmanager, operator: In, values: [{{ template "kube-prometheus-stack.alertmanager.crname" . }}]}
|
||||
{{- else if eq .Values.alertmanager.alertmanagerSpec.podAntiAffinity "soft" }}
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
topologyKey: {{ .Values.alertmanager.alertmanagerSpec.podAntiAffinityTopologyKey }}
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- {key: app.kubernetes.io/name, operator: In, values: [alertmanager]}
|
||||
- {key: alertmanager, operator: In, values: [{{ template "kube-prometheus-stack.alertmanager.crname" . }}]}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.tolerations | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.topologySpreadConstraints | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{ include "kube-prometheus-stack.imagePullSecrets" . | trim | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.containers }}
|
||||
containers:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.containers | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.initContainers }}
|
||||
initContainers:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.initContainers | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.priorityClassName }}
|
||||
priorityClassName: {{.Values.alertmanager.alertmanagerSpec.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.additionalPeers }}
|
||||
additionalPeers:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.additionalPeers | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.volumes }}
|
||||
volumes:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.volumes | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.volumeMounts | indent 4 }}
|
||||
{{- end }}
|
||||
portName: {{ .Values.alertmanager.alertmanagerSpec.portName }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.clusterAdvertiseAddress }}
|
||||
clusterAdvertiseAddress: {{ .Values.alertmanager.alertmanagerSpec.clusterAdvertiseAddress }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.clusterGossipInterval }}
|
||||
clusterGossipInterval: {{ .Values.alertmanager.alertmanagerSpec.clusterGossipInterval }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.clusterPeerTimeout }}
|
||||
clusterPeerTimeout: {{ .Values.alertmanager.alertmanagerSpec.clusterPeerTimeout }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.clusterPushpullInterval }}
|
||||
clusterPushpullInterval: {{ .Values.alertmanager.alertmanagerSpec.clusterPushpullInterval }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.clusterLabel }}
|
||||
clusterLabel: {{ .Values.alertmanager.alertmanagerSpec.clusterLabel }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.forceEnableClusterMode }}
|
||||
forceEnableClusterMode: {{ .Values.alertmanager.alertmanagerSpec.forceEnableClusterMode }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.minReadySeconds }}
|
||||
minReadySeconds: {{ .Values.alertmanager.alertmanagerSpec.minReadySeconds }}
|
||||
{{- end }}
|
||||
{{- with .Values.alertmanager.alertmanagerSpec.additionalConfig }}
|
||||
{{- tpl (toYaml .) $ | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- with .Values.alertmanager.alertmanagerSpec.additionalConfigString }}
|
||||
{{- tpl . $ | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,20 @@
|
||||
{{- if .Values.alertmanager.extraSecret.data -}}
|
||||
{{- $secretName := printf "alertmanager-%s-extra" (include "kube-prometheus-stack.fullname" . ) -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ default $secretName .Values.alertmanager.extraSecret.name }}
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
{{- if .Values.alertmanager.extraSecret.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.alertmanager.extraSecret.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
data:
|
||||
{{- range $key, $val := .Values.alertmanager.extraSecret.data }}
|
||||
{{ $key }}: {{ $val | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,78 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled }}
|
||||
{{- $pathType := .Values.alertmanager.ingress.pathType | default "ImplementationSpecific" }}
|
||||
{{- $serviceName := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager" }}
|
||||
{{- $backendServiceName := .Values.alertmanager.ingress.serviceName | default (printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager") }}
|
||||
{{- $servicePort := .Values.alertmanager.ingress.servicePort | default .Values.alertmanager.service.port -}}
|
||||
{{- $routePrefix := list .Values.alertmanager.alertmanagerSpec.routePrefix }}
|
||||
{{- $paths := .Values.alertmanager.ingress.paths | default $routePrefix -}}
|
||||
{{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}}
|
||||
{{- $ingressSupportsPathType := eq (include "kube-prometheus-stack.ingress.supportsPathType" .) "true" -}}
|
||||
apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" . }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $serviceName }}
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
{{- if .Values.alertmanager.ingress.annotations }}
|
||||
annotations:
|
||||
{{- tpl (toYaml .Values.alertmanager.ingress.annotations) . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
{{- if .Values.alertmanager.ingress.labels }}
|
||||
{{ toYaml .Values.alertmanager.ingress.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
{{- if $apiIsStable }}
|
||||
{{- if .Values.alertmanager.ingress.ingressClassName }}
|
||||
ingressClassName: {{ .Values.alertmanager.ingress.ingressClassName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- if .Values.alertmanager.ingress.hosts }}
|
||||
{{- range $host := .Values.alertmanager.ingress.hosts }}
|
||||
- host: {{ tpl $host $ | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range $p := $paths }}
|
||||
- path: {{ tpl $p $ }}
|
||||
{{- if and $pathType $ingressSupportsPathType }}
|
||||
pathType: {{ $pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if $apiIsStable }}
|
||||
service:
|
||||
name: {{ $backendServiceName }}
|
||||
port:
|
||||
number: {{ $servicePort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $backendServiceName }}
|
||||
servicePort: {{ $servicePort }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
- http:
|
||||
paths:
|
||||
{{- range $p := $paths }}
|
||||
- path: {{ tpl $p $ }}
|
||||
{{- if and $pathType $ingressSupportsPathType }}
|
||||
pathType: {{ $pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if $apiIsStable }}
|
||||
service:
|
||||
name: {{ $backendServiceName }}
|
||||
port:
|
||||
number: {{ $servicePort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $backendServiceName }}
|
||||
servicePort: {{ $servicePort }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if .Values.alertmanager.ingress.tls }}
|
||||
tls:
|
||||
{{ tpl (toYaml .Values.alertmanager.ingress.tls | indent 4) . }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@ -0,0 +1,67 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.alertmanager.servicePerReplica.enabled .Values.alertmanager.ingressPerReplica.enabled }}
|
||||
{{- $pathType := .Values.alertmanager.ingressPerReplica.pathType | default "" }}
|
||||
{{- $count := .Values.alertmanager.alertmanagerSpec.replicas | int -}}
|
||||
{{- $servicePort := .Values.alertmanager.service.port -}}
|
||||
{{- $ingressValues := .Values.alertmanager.ingressPerReplica -}}
|
||||
{{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}}
|
||||
{{- $ingressSupportsPathType := eq (include "kube-prometheus-stack.ingress.supportsPathType" .) "true" -}}
|
||||
apiVersion: v1
|
||||
kind: List
|
||||
metadata:
|
||||
name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-ingressperreplica
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
items:
|
||||
{{ range $i, $e := until $count }}
|
||||
- kind: Ingress
|
||||
apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" $ }}
|
||||
metadata:
|
||||
name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" $ }}
|
||||
labels:
|
||||
app: {{ include "kube-prometheus-stack.name" $ }}-alertmanager
|
||||
{{ include "kube-prometheus-stack.labels" $ | indent 8 }}
|
||||
{{- if $ingressValues.labels }}
|
||||
{{ toYaml $ingressValues.labels | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if $ingressValues.annotations }}
|
||||
annotations:
|
||||
{{- tpl (toYaml $ingressValues.annotations) $ | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if $apiIsStable }}
|
||||
{{- if $ingressValues.ingressClassName }}
|
||||
ingressClassName: {{ $ingressValues.ingressClassName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- host: {{ $ingressValues.hostPrefix }}-{{ $i }}.{{ $ingressValues.hostDomain }}
|
||||
http:
|
||||
paths:
|
||||
{{- range $p := $ingressValues.paths }}
|
||||
- path: {{ tpl $p $ }}
|
||||
{{- if and $pathType $ingressSupportsPathType }}
|
||||
pathType: {{ $pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if $apiIsStable }}
|
||||
service:
|
||||
name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }}
|
||||
port:
|
||||
number: {{ $servicePort }}
|
||||
{{- else }}
|
||||
serviceName: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }}
|
||||
servicePort: {{ $servicePort }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{- if or $ingressValues.tlsSecretName $ingressValues.tlsSecretPerReplica.enabled }}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ $ingressValues.hostPrefix }}-{{ $i }}.{{ $ingressValues.hostDomain }}
|
||||
{{- if $ingressValues.tlsSecretPerReplica.enabled }}
|
||||
secretName: {{ $ingressValues.tlsSecretPerReplica.prefix }}-{{ $i }}
|
||||
{{- else }}
|
||||
secretName: {{ $ingressValues.tlsSecretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@ -0,0 +1,72 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.alertmanager.networkPolicy.enabled }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
{{- include "kube-prometheus-stack.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: alertmanager
|
||||
policyTypes:
|
||||
{{- toYaml .Values.alertmanager.networkPolicy.policyTypes | nindent 4 }}
|
||||
ingress:
|
||||
{{- if and (.Values.alertmanager.networkPolicy.gateway.namespace) (.Values.alertmanager.networkPolicy.gateway.podLabels) }}
|
||||
# Allow ingress from gateway
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
kubernetes.io/metadata.name: {{ .Values.alertmanager.networkPolicy.gateway.namespace }}
|
||||
{{- if and .Values.alertmanager.networkPolicy.gateway.podLabels (not (empty .Values.alertmanager.networkPolicy.gateway.podLabels)) }}
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- toYaml .Values.alertmanager.networkPolicy.gateway.podLabels | nindent 14 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- port: {{ .Values.alertmanager.service.port }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.networkPolicy.monitoringRules.prometheus }}
|
||||
# Allow ingress from Prometheus
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: prometheus
|
||||
ports:
|
||||
- port: {{ .Values.alertmanager.service.port }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- if and (.Values.alertmanager.networkPolicy.enableClusterRules) (.Values.alertmanager.service.clusterPort) }}
|
||||
# Allow ingress from other Alertmanager pods (for clustering)
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: alertmanager
|
||||
ports:
|
||||
- port: {{ .Values.alertmanager.service.clusterPort }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.networkPolicy.monitoringRules.configReloader }}
|
||||
# Allow ingress for config reloader metrics
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: alertmanager
|
||||
component: config-reloader
|
||||
ports:
|
||||
- port: 8080
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- with .Values.alertmanager.networkPolicy.additionalIngress }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.networkPolicy.egress.enabled }}
|
||||
egress:
|
||||
{{- with .Values.alertmanager.networkPolicy.egress.rules }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,21 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.alertmanager.podDisruptionBudget.enabled }}
|
||||
apiVersion: {{ include "kube-prometheus-stack.pdb.apiVersion" . }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
{{- if .Values.alertmanager.podDisruptionBudget.minAvailable }}
|
||||
minAvailable: {{ .Values.alertmanager.podDisruptionBudget.minAvailable }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.podDisruptionBudget.maxUnavailable }}
|
||||
maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: alertmanager
|
||||
alertmanager: {{ template "kube-prometheus-stack.alertmanager.crname" . }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,23 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
rules:
|
||||
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
|
||||
{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }}
|
||||
- apiGroups: ['policy']
|
||||
{{- else }}
|
||||
- apiGroups: ['extensions']
|
||||
{{- end }}
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames:
|
||||
- {{ template "kube-prometheus-stack.fullname" . }}-alertmanager
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,20 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }}
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
47
charts/kube-prometheus-stack/templates/alertmanager/psp.yaml
Normal file
47
charts/kube-prometheus-stack/templates/alertmanager/psp.yaml
Normal file
@ -0,0 +1,47 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
{{- if .Values.global.rbac.pspAnnotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.global.rbac.pspAnnotations | indent 4 }}
|
||||
{{- end }}
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
privileged: false
|
||||
# Allow core volume types.
|
||||
volumes:
|
||||
- 'configMap'
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
- 'persistentVolumeClaim'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
# Permits the container to run with root privileges as well.
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
# This policy assumes the nodes are using AppArmor rather than SELinux.
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
# Allow adding the root group.
|
||||
- min: 0
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
# Allow adding the root group.
|
||||
- min: 0
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: false
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,56 @@
|
||||
{{- if .Values.alertmanager.enabled -}}
|
||||
{{- $serviceName := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager" }}
|
||||
{{- $servicePort := .Values.alertmanager.ingress.servicePort | default .Values.alertmanager.service.port -}}
|
||||
{{- range $name, $route := .Values.alertmanager.route }}
|
||||
{{- if $route.enabled }}
|
||||
---
|
||||
apiVersion: {{ $route.apiVersion | default "gateway.networking.k8s.io/v1" }}
|
||||
kind: {{ $route.kind | default "HTTPRoute" }}
|
||||
metadata:
|
||||
{{- with $route.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ $serviceName }}{{ if ne $name "main" }}-{{ $name }}{{ end }}
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" $ }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" $ }}-alertmanager
|
||||
{{- include "kube-prometheus-stack.labels" $ | nindent 4 }}
|
||||
{{- with $route.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with $route.parentRefs }}
|
||||
parentRefs:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with $route.hostnames }}
|
||||
hostnames:
|
||||
{{- tpl (toYaml .) $ | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- if $route.additionalRules }}
|
||||
{{- tpl (toYaml $route.additionalRules) $ | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $route.httpsRedirect }}
|
||||
- filters:
|
||||
- type: RequestRedirect
|
||||
requestRedirect:
|
||||
scheme: https
|
||||
statusCode: 301
|
||||
{{- else }}
|
||||
- backendRefs:
|
||||
- name: {{ $serviceName }}
|
||||
port: {{ $servicePort }}
|
||||
{{- with $route.filters }}
|
||||
filters:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $route.matches }}
|
||||
matches:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,29 @@
|
||||
{{- if and (.Values.alertmanager.enabled) (not .Values.alertmanager.alertmanagerSpec.useExistingSecret) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: alertmanager-{{ template "kube-prometheus-stack.alertmanager.crname" . }}
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
{{- if .Values.alertmanager.secret.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.alertmanager.secret.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
data:
|
||||
{{- if .Values.alertmanager.tplConfig }}
|
||||
{{- if .Values.alertmanager.stringConfig }}
|
||||
alertmanager.yaml: {{ tpl (.Values.alertmanager.stringConfig) . | b64enc | quote }}
|
||||
{{- else if eq (typeOf .Values.alertmanager.config) "string" }}
|
||||
alertmanager.yaml: {{ tpl (.Values.alertmanager.config) . | b64enc | quote }}
|
||||
{{- else }}
|
||||
alertmanager.yaml: {{ tpl (toYaml .Values.alertmanager.config) . | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
alertmanager.yaml: {{ toYaml .Values.alertmanager.config | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- range $key, $val := .Values.alertmanager.templateFiles }}
|
||||
{{ $key }}: {{ $val | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,72 @@
|
||||
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
|
||||
{{- if and .Values.alertmanager.enabled .Values.alertmanager.service.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
self-monitor: {{ .Values.alertmanager.serviceMonitor.selfMonitor | quote }}
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
{{- if .Values.alertmanager.service.labels }}
|
||||
{{ toYaml .Values.alertmanager.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.alertmanager.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.alertmanager.service.clusterIP }}
|
||||
clusterIP: {{ .Values.alertmanager.service.clusterIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }}
|
||||
- {{ $cidr }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if ne .Values.alertmanager.service.type "ClusterIP" }}
|
||||
externalTrafficPolicy: {{ .Values.alertmanager.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.alertmanager.alertmanagerSpec.portName }}
|
||||
{{- if eq .Values.alertmanager.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.alertmanager.service.nodePort }}
|
||||
{{- end }}
|
||||
port: {{ .Values.alertmanager.service.port }}
|
||||
targetPort: {{ .Values.alertmanager.service.targetPort }}
|
||||
protocol: TCP
|
||||
- name: reloader-web
|
||||
{{- if semverCompare ">=1.20.0-0" $kubeTargetVersion }}
|
||||
appProtocol: http
|
||||
{{- end }}
|
||||
port: 8080
|
||||
targetPort: reloader-web
|
||||
{{- if .Values.alertmanager.service.additionalPorts }}
|
||||
{{ toYaml .Values.alertmanager.service.additionalPorts | indent 2 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: alertmanager
|
||||
alertmanager: {{ template "kube-prometheus-stack.alertmanager.crname" . }}
|
||||
{{- if .Values.alertmanager.service.sessionAffinity }}
|
||||
sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.alertmanager.service.sessionAffinity "ClientIP" }}
|
||||
sessionAffinityConfig:
|
||||
clientIP:
|
||||
timeoutSeconds: {{ .Values.alertmanager.service.sessionAffinityConfig.clientIP.timeoutSeconds }}
|
||||
{{- end }}
|
||||
type: "{{ .Values.alertmanager.service.type }}"
|
||||
{{- if .Values.alertmanager.service.ipDualStack.enabled }}
|
||||
ipFamilies: {{ toYaml .Values.alertmanager.service.ipDualStack.ipFamilies | nindent 4 }}
|
||||
ipFamilyPolicy: {{ .Values.alertmanager.service.ipDualStack.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,21 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.alertmanager.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }}
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
app.kubernetes.io/component: alertmanager
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
{{- if .Values.alertmanager.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.alertmanager.serviceAccount.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.alertmanager.serviceAccount.automountServiceAccountToken }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{ include "kube-prometheus-stack.imagePullSecrets" . | trim | indent 2}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,93 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.alertmanager.serviceMonitor.selfMonitor }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
{{- with .Values.alertmanager.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- include "servicemonitor.scrapeLimits" .Values.alertmanager.serviceMonitor | nindent 2 }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
|
||||
release: {{ $.Release.Name | quote }}
|
||||
self-monitor: "true"
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ printf "%s" (include "kube-prometheus-stack.namespace" .) | quote }}
|
||||
endpoints:
|
||||
- port: {{ .Values.alertmanager.alertmanagerSpec.portName }}
|
||||
enableHttp2: {{ .Values.alertmanager.serviceMonitor.enableHttp2 }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.interval }}
|
||||
interval: {{ .Values.alertmanager.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.alertmanager.serviceMonitor.proxyUrl}}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.scheme }}
|
||||
scheme: {{ .Values.alertmanager.serviceMonitor.scheme }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.bearerTokenFile }}
|
||||
bearerTokenFile: {{ .Values.alertmanager.serviceMonitor.bearerTokenFile }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.tlsConfig }}
|
||||
tlsConfig: {{- toYaml .Values.alertmanager.serviceMonitor.tlsConfig | nindent 6 }}
|
||||
{{- end }}
|
||||
path: "{{ trimSuffix "/" .Values.alertmanager.alertmanagerSpec.routePrefix }}/metrics"
|
||||
{{- if .Values.alertmanager.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings: {{- tpl (toYaml .Values.alertmanager.serviceMonitor.metricRelabelings | nindent 6) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.relabelings }}
|
||||
relabelings: {{- toYaml .Values.alertmanager.serviceMonitor.relabelings | nindent 6 }}
|
||||
{{- end }}
|
||||
- port: reloader-web
|
||||
{{- if .Values.alertmanager.serviceMonitor.interval }}
|
||||
interval: {{ .Values.alertmanager.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.alertmanager.serviceMonitor.proxyUrl}}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.scheme }}
|
||||
scheme: {{ .Values.alertmanager.serviceMonitor.scheme }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.tlsConfig }}
|
||||
tlsConfig: {{- toYaml .Values.alertmanager.serviceMonitor.tlsConfig | nindent 6 }}
|
||||
{{- end }}
|
||||
path: "/metrics"
|
||||
{{- if .Values.alertmanager.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings: {{- tpl (toYaml .Values.alertmanager.serviceMonitor.metricRelabelings | nindent 6) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.relabelings }}
|
||||
relabelings: {{- toYaml .Values.alertmanager.serviceMonitor.relabelings | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- range .Values.alertmanager.serviceMonitor.additionalEndpoints }}
|
||||
- port: {{ .port }}
|
||||
{{- if or $.Values.alertmanager.serviceMonitor.interval .interval }}
|
||||
interval: {{ default $.Values.alertmanager.serviceMonitor.interval .interval }}
|
||||
{{- end }}
|
||||
{{- if or $.Values.alertmanager.serviceMonitor.proxyUrl .proxyUrl }}
|
||||
proxyUrl: {{ default $.Values.alertmanager.serviceMonitor.proxyUrl .proxyUrl }}
|
||||
{{- end }}
|
||||
{{- if or $.Values.alertmanager.serviceMonitor.scheme .scheme }}
|
||||
scheme: {{ default $.Values.alertmanager.serviceMonitor.scheme .scheme }}
|
||||
{{- end }}
|
||||
{{- if or $.Values.alertmanager.serviceMonitor.bearerTokenFile .bearerTokenFile }}
|
||||
bearerTokenFile: {{ default $.Values.alertmanager.serviceMonitor.bearerTokenFile .bearerTokenFile }}
|
||||
{{- end }}
|
||||
{{- if or $.Values.alertmanager.serviceMonitor.tlsConfig .tlsConfig }}
|
||||
tlsConfig: {{- default $.Values.alertmanager.serviceMonitor.tlsConfig .tlsConfig | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
path: {{ .path }}
|
||||
{{- if or $.Values.alertmanager.serviceMonitor.metricRelabelings .metricRelabelings }}
|
||||
metricRelabelings: {{- tpl (default $.Values.alertmanager.serviceMonitor.metricRelabelings .metricRelabelings | toYaml | nindent 6) . }}
|
||||
{{- end }}
|
||||
{{- if or $.Values.alertmanager.serviceMonitor.relabelings .relabelings }}
|
||||
relabelings: {{- default $.Values.alertmanager.serviceMonitor.relabelings .relabelings | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,49 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.alertmanager.servicePerReplica.enabled }}
|
||||
{{- $count := .Values.alertmanager.alertmanagerSpec.replicas | int -}}
|
||||
{{- $serviceValues := .Values.alertmanager.servicePerReplica -}}
|
||||
apiVersion: v1
|
||||
kind: List
|
||||
metadata:
|
||||
name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-serviceperreplica
|
||||
namespace: {{ template "kube-prometheus-stack-alertmanager.namespace" . }}
|
||||
items:
|
||||
{{- range $i, $e := until $count }}
|
||||
- apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" $ }}
|
||||
labels:
|
||||
app: {{ include "kube-prometheus-stack.name" $ }}-alertmanager
|
||||
{{ include "kube-prometheus-stack.labels" $ | indent 8 }}
|
||||
{{- if $serviceValues.annotations }}
|
||||
annotations:
|
||||
{{ toYaml $serviceValues.annotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if $serviceValues.clusterIP }}
|
||||
clusterIP: {{ $serviceValues.clusterIP }}
|
||||
{{- end }}
|
||||
{{- if $serviceValues.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{- range $cidr := $serviceValues.loadBalancerSourceRanges }}
|
||||
- {{ $cidr }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if ne $serviceValues.type "ClusterIP" }}
|
||||
externalTrafficPolicy: {{ $serviceValues.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ $.Values.alertmanager.alertmanagerSpec.portName }}
|
||||
{{- if eq $serviceValues.type "NodePort" }}
|
||||
nodePort: {{ $serviceValues.nodePort }}
|
||||
{{- end }}
|
||||
port: {{ $serviceValues.port }}
|
||||
targetPort: {{ $serviceValues.targetPort }}
|
||||
selector:
|
||||
app.kubernetes.io/name: alertmanager
|
||||
alertmanager: {{ template "kube-prometheus-stack.alertmanager.crname" $ }}
|
||||
statefulset.kubernetes.io/pod-name: alertmanager-{{ include "kube-prometheus-stack.alertmanager.crname" $ }}-{{ $i }}
|
||||
type: "{{ $serviceValues.type }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,28 @@
|
||||
{{- if and .Values.coreDns.enabled .Values.coreDns.service.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-coredns
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-coredns
|
||||
jobLabel: coredns
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.coreDns.service.ipDualStack.enabled }}
|
||||
ipFamilies: {{ toYaml .Values.coreDns.service.ipDualStack.ipFamilies | nindent 4 }}
|
||||
ipFamilyPolicy: {{ .Values.coreDns.service.ipDualStack.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.coreDns.serviceMonitor.port }}
|
||||
port: {{ .Values.coreDns.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.coreDns.service.targetPort }}
|
||||
selector:
|
||||
{{- if .Values.coreDns.service.selector }}
|
||||
{{ toYaml .Values.coreDns.service.selector | indent 4 }}
|
||||
{{- else}}
|
||||
k8s-app: kube-dns
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
@ -0,0 +1,52 @@
|
||||
{{- if and .Values.coreDns.enabled .Values.coreDns.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-coredns
|
||||
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
|
||||
namespace: kube-system
|
||||
{{- else }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-coredns
|
||||
{{- with .Values.coreDns.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
jobLabel: {{ .Values.coreDns.serviceMonitor.jobLabel }}
|
||||
{{- with .Values.coreDns.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- include "servicemonitor.scrapeLimits" .Values.coreDns.serviceMonitor | nindent 2 }}
|
||||
selector:
|
||||
{{- if .Values.coreDns.serviceMonitor.selector }}
|
||||
{{ tpl (toYaml .Values.coreDns.serviceMonitor.selector | nindent 4) . }}
|
||||
{{- else }}
|
||||
matchLabels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-coredns
|
||||
release: {{ $.Release.Name | quote }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "kube-system"
|
||||
endpoints:
|
||||
- port: {{ .Values.coreDns.serviceMonitor.port }}
|
||||
{{- if .Values.coreDns.serviceMonitor.interval}}
|
||||
interval: {{ .Values.coreDns.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.coreDns.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.coreDns.serviceMonitor.proxyUrl}}
|
||||
{{- end }}
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
{{- if .Values.coreDns.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.coreDns.serviceMonitor.metricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.coreDns.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.coreDns.serviceMonitor.relabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,51 @@
|
||||
{{- if and .Values.kubeApiServer.enabled .Values.kubeApiServer.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-apiserver
|
||||
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
|
||||
namespace: default
|
||||
{{- else }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-apiserver
|
||||
{{- with .Values.kubeApiServer.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
{{- include "servicemonitor.scrapeLimits" .Values.kubeApiServer.serviceMonitor | nindent 2 }}
|
||||
endpoints:
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
{{- if .Values.kubeApiServer.serviceMonitor.interval }}
|
||||
interval: {{ .Values.kubeApiServer.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeApiServer.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.kubeApiServer.serviceMonitor.proxyUrl }}
|
||||
{{- end }}
|
||||
port: https
|
||||
scheme: https
|
||||
{{- if .Values.kubeApiServer.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.kubeApiServer.serviceMonitor.metricRelabelings | indent 6) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeApiServer.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.kubeApiServer.serviceMonitor.relabelings | indent 6) . }}
|
||||
{{- end }}
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
serverName: {{ .Values.kubeApiServer.tlsConfig.serverName }}
|
||||
insecureSkipVerify: {{ .Values.kubeApiServer.tlsConfig.insecureSkipVerify }}
|
||||
jobLabel: {{ .Values.kubeApiServer.serviceMonitor.jobLabel }}
|
||||
{{- with .Values.kubeApiServer.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- default
|
||||
selector:
|
||||
{{ toYaml .Values.kubeApiServer.serviceMonitor.selector | indent 4 }}
|
||||
{{- end}}
|
||||
@ -0,0 +1,22 @@
|
||||
{{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.endpoints .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-controller-manager
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager
|
||||
k8s-app: kube-controller-manager
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
namespace: kube-system
|
||||
subsets:
|
||||
- addresses:
|
||||
{{- range .Values.kubeControllerManager.endpoints }}
|
||||
- ip: {{ . }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.kubeControllerManager.serviceMonitor.port }}
|
||||
{{- $kubeControllerManagerDefaultInsecurePort := 10252 }}
|
||||
{{- $kubeControllerManagerDefaultSecurePort := 10257 }}
|
||||
port: {{ include "kube-prometheus-stack.kubeControllerManager.insecureScrape" (list . $kubeControllerManagerDefaultInsecurePort $kubeControllerManagerDefaultSecurePort .Values.kubeControllerManager.service.port) }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
@ -0,0 +1,33 @@
|
||||
{{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.service.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-controller-manager
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager
|
||||
jobLabel: kube-controller-manager
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.kubeControllerManager.service.ipDualStack.enabled }}
|
||||
ipFamilies: {{ toYaml .Values.kubeControllerManager.service.ipDualStack.ipFamilies | nindent 4 }}
|
||||
ipFamilyPolicy: {{ .Values.kubeControllerManager.service.ipDualStack.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.kubeControllerManager.serviceMonitor.port }}
|
||||
{{- $kubeControllerManagerDefaultInsecurePort := 10252 }}
|
||||
{{- $kubeControllerManagerDefaultSecurePort := 10257 }}
|
||||
port: {{ include "kube-prometheus-stack.kubeControllerManager.insecureScrape" (list . $kubeControllerManagerDefaultInsecurePort $kubeControllerManagerDefaultSecurePort .Values.kubeControllerManager.service.port) }}
|
||||
protocol: TCP
|
||||
targetPort: {{ include "kube-prometheus-stack.kubeControllerManager.insecureScrape" (list . $kubeControllerManagerDefaultInsecurePort $kubeControllerManagerDefaultSecurePort .Values.kubeControllerManager.service.targetPort) }}
|
||||
{{- if .Values.kubeControllerManager.endpoints }}{{- else }}
|
||||
selector:
|
||||
{{- if .Values.kubeControllerManager.service.selector }}
|
||||
{{ toYaml .Values.kubeControllerManager.service.selector | indent 4 }}
|
||||
{{- else}}
|
||||
component: kube-controller-manager
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
type: ClusterIP
|
||||
{{- end }}
|
||||
@ -0,0 +1,63 @@
|
||||
{{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-controller-manager
|
||||
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
|
||||
namespace: kube-system
|
||||
{{- else }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager
|
||||
{{- with .Values.kubeControllerManager.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
jobLabel: {{ .Values.kubeControllerManager.serviceMonitor.jobLabel }}
|
||||
{{- with .Values.kubeControllerManager.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- include "servicemonitor.scrapeLimits" .Values.kubeControllerManager.serviceMonitor | nindent 2 }}
|
||||
selector:
|
||||
{{- if .Values.kubeControllerManager.serviceMonitor.selector }}
|
||||
{{ tpl (toYaml .Values.kubeControllerManager.serviceMonitor.selector | nindent 4) . }}
|
||||
{{- else }}
|
||||
matchLabels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager
|
||||
release: {{ $.Release.Name | quote }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "kube-system"
|
||||
endpoints:
|
||||
- port: {{ .Values.kubeControllerManager.serviceMonitor.port }}
|
||||
{{- if .Values.kubeControllerManager.serviceMonitor.interval }}
|
||||
interval: {{ .Values.kubeControllerManager.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
{{- if .Values.kubeControllerManager.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.kubeControllerManager.serviceMonitor.proxyUrl}}
|
||||
{{- end }}
|
||||
{{- if eq (include "kube-prometheus-stack.kubeControllerManager.insecureScrape" (list . false true .Values.kubeControllerManager.serviceMonitor.https )) "true" }}
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
{{- if eq (include "kube-prometheus-stack.kubeControllerManager.insecureScrape" (list . nil true .Values.kubeControllerManager.serviceMonitor.insecureSkipVerify)) "true" }}
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
{{- if .Values.kubeControllerManager.serviceMonitor.serverName }}
|
||||
serverName: {{ .Values.kubeControllerManager.serviceMonitor.serverName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeControllerManager.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.kubeControllerManager.serviceMonitor.metricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeControllerManager.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.kubeControllerManager.serviceMonitor.relabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,32 @@
|
||||
{{- if and .Values.kubeDns.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-dns
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-dns
|
||||
jobLabel: kube-dns
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.kubeDns.service.ipDualStack.enabled }}
|
||||
ipFamilies: {{ toYaml .Values.kubeDns.service.ipDualStack.ipFamilies | nindent 4 }}
|
||||
ipFamilyPolicy: {{ .Values.kubeDns.service.ipDualStack.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http-metrics-dnsmasq
|
||||
port: {{ .Values.kubeDns.service.dnsmasq.port }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.kubeDns.service.dnsmasq.targetPort }}
|
||||
- name: http-metrics-skydns
|
||||
port: {{ .Values.kubeDns.service.skydns.port }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.kubeDns.service.skydns.targetPort }}
|
||||
selector:
|
||||
{{- if .Values.kubeDns.service.selector }}
|
||||
{{ toYaml .Values.kubeDns.service.selector | indent 4 }}
|
||||
{{- else}}
|
||||
k8s-app: kube-dns
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
@ -0,0 +1,65 @@
|
||||
{{- if and .Values.kubeDns.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-dns
|
||||
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
|
||||
namespace: kube-system
|
||||
{{- else }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-dns
|
||||
{{- with .Values.kubeDns.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
jobLabel: {{ .Values.kubeDns.serviceMonitor.jobLabel }}
|
||||
{{- with .Values.kubeDns.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- include "servicemonitor.scrapeLimits" .Values.kubeDns.serviceMonitor | nindent 2 }}
|
||||
selector:
|
||||
{{- if .Values.kubeDns.serviceMonitor.selector }}
|
||||
{{ tpl (toYaml .Values.kubeDns.serviceMonitor.selector | nindent 4) . }}
|
||||
{{- else }}
|
||||
matchLabels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-dns
|
||||
release: {{ $.Release.Name | quote }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "kube-system"
|
||||
endpoints:
|
||||
- port: http-metrics-dnsmasq
|
||||
{{- if .Values.kubeDns.serviceMonitor.interval }}
|
||||
interval: {{ .Values.kubeDns.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
{{- if .Values.kubeDns.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.kubeDns.serviceMonitor.proxyUrl}}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeDns.serviceMonitor.dnsmasqMetricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.kubeDns.serviceMonitor.dnsmasqMetricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeDns.serviceMonitor.dnsmasqRelabelings }}
|
||||
relabelings:
|
||||
{{ toYaml .Values.kubeDns.serviceMonitor.dnsmasqRelabelings | indent 4 }}
|
||||
{{- end }}
|
||||
- port: http-metrics-skydns
|
||||
{{- if .Values.kubeDns.serviceMonitor.interval }}
|
||||
interval: {{ .Values.kubeDns.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
{{- if .Values.kubeDns.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.kubeDns.serviceMonitor.metricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeDns.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.kubeDns.serviceMonitor.relabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,20 @@
|
||||
{{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.endpoints .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-etcd
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd
|
||||
k8s-app: etcd-server
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
namespace: kube-system
|
||||
subsets:
|
||||
- addresses:
|
||||
{{- range .Values.kubeEtcd.endpoints }}
|
||||
- ip: {{ . }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.kubeEtcd.serviceMonitor.port }}
|
||||
port: {{ .Values.kubeEtcd.service.port }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
@ -0,0 +1,31 @@
|
||||
{{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.service.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-etcd
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd
|
||||
jobLabel: kube-etcd
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.kubeEtcd.service.ipDualStack.enabled }}
|
||||
ipFamilies: {{ toYaml .Values.kubeEtcd.service.ipDualStack.ipFamilies | nindent 4 }}
|
||||
ipFamilyPolicy: {{ .Values.kubeEtcd.service.ipDualStack.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.kubeEtcd.serviceMonitor.port }}
|
||||
port: {{ .Values.kubeEtcd.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.kubeEtcd.service.targetPort }}
|
||||
{{- if .Values.kubeEtcd.endpoints }}{{- else }}
|
||||
selector:
|
||||
{{- if .Values.kubeEtcd.service.selector }}
|
||||
{{ toYaml .Values.kubeEtcd.service.selector | indent 4 }}
|
||||
{{- else}}
|
||||
component: etcd
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
type: ClusterIP
|
||||
{{- end -}}
|
||||
@ -0,0 +1,69 @@
|
||||
{{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-etcd
|
||||
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
|
||||
namespace: kube-system
|
||||
{{- else }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd
|
||||
{{- with .Values.kubeEtcd.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
jobLabel: {{ .Values.kubeEtcd.serviceMonitor.jobLabel }}
|
||||
{{- with .Values.kubeEtcd.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- include "servicemonitor.scrapeLimits" .Values.kubeEtcd.serviceMonitor | nindent 2 }}
|
||||
selector:
|
||||
{{- if .Values.kubeEtcd.serviceMonitor.selector }}
|
||||
{{ tpl (toYaml .Values.kubeEtcd.serviceMonitor.selector | nindent 4) . }}
|
||||
{{- else }}
|
||||
matchLabels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd
|
||||
release: {{ $.Release.Name | quote }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "kube-system"
|
||||
endpoints:
|
||||
- port: {{ .Values.kubeEtcd.serviceMonitor.port }}
|
||||
{{- if .Values.kubeEtcd.serviceMonitor.interval }}
|
||||
interval: {{ .Values.kubeEtcd.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
{{- if .Values.kubeEtcd.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.kubeEtcd.serviceMonitor.proxyUrl}}
|
||||
{{- end }}
|
||||
{{- if eq .Values.kubeEtcd.serviceMonitor.scheme "https" }}
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
{{- if .Values.kubeEtcd.serviceMonitor.serverName }}
|
||||
serverName: {{ .Values.kubeEtcd.serviceMonitor.serverName }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeEtcd.serviceMonitor.caFile }}
|
||||
caFile: {{ .Values.kubeEtcd.serviceMonitor.caFile }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeEtcd.serviceMonitor.certFile }}
|
||||
certFile: {{ .Values.kubeEtcd.serviceMonitor.certFile }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeEtcd.serviceMonitor.keyFile }}
|
||||
keyFile: {{ .Values.kubeEtcd.serviceMonitor.keyFile }}
|
||||
{{- end}}
|
||||
insecureSkipVerify: {{ .Values.kubeEtcd.serviceMonitor.insecureSkipVerify }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeEtcd.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.kubeEtcd.serviceMonitor.metricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeEtcd.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.kubeEtcd.serviceMonitor.relabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,20 @@
|
||||
{{- if and .Values.kubeProxy.enabled .Values.kubeProxy.endpoints .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-proxy
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy
|
||||
k8s-app: kube-proxy
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
namespace: kube-system
|
||||
subsets:
|
||||
- addresses:
|
||||
{{- range .Values.kubeProxy.endpoints }}
|
||||
- ip: {{ . }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.kubeProxy.serviceMonitor.port }}
|
||||
port: {{ .Values.kubeProxy.service.port }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
@ -0,0 +1,31 @@
|
||||
{{- if and .Values.kubeProxy.enabled .Values.kubeProxy.service.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-proxy
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy
|
||||
jobLabel: kube-proxy
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.kubeProxy.service.ipDualStack.enabled }}
|
||||
ipFamilies: {{ toYaml .Values.kubeProxy.service.ipDualStack.ipFamilies | nindent 4 }}
|
||||
ipFamilyPolicy: {{ .Values.kubeProxy.service.ipDualStack.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.kubeProxy.serviceMonitor.port }}
|
||||
port: {{ .Values.kubeProxy.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.kubeProxy.service.targetPort }}
|
||||
{{- if .Values.kubeProxy.endpoints }}{{- else }}
|
||||
selector:
|
||||
{{- if .Values.kubeProxy.service.selector }}
|
||||
{{ toYaml .Values.kubeProxy.service.selector | indent 4 }}
|
||||
{{- else}}
|
||||
k8s-app: kube-proxy
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
type: ClusterIP
|
||||
{{- end -}}
|
||||
@ -0,0 +1,57 @@
|
||||
{{- if and .Values.kubeProxy.enabled .Values.kubeProxy.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-proxy
|
||||
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
|
||||
namespace: kube-system
|
||||
{{- else }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy
|
||||
{{- with .Values.kubeProxy.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
jobLabel: {{ .Values.kubeProxy.serviceMonitor.jobLabel }}
|
||||
{{- with .Values.kubeProxy.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- include "servicemonitor.scrapeLimits" .Values.kubeProxy.serviceMonitor | nindent 2 }}
|
||||
selector:
|
||||
{{- if .Values.kubeProxy.serviceMonitor.selector }}
|
||||
{{ tpl (toYaml .Values.kubeProxy.serviceMonitor.selector | nindent 4) . }}
|
||||
{{- else }}
|
||||
matchLabels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy
|
||||
release: {{ $.Release.Name | quote }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "kube-system"
|
||||
endpoints:
|
||||
- port: {{ .Values.kubeProxy.serviceMonitor.port }}
|
||||
{{- if .Values.kubeProxy.serviceMonitor.interval }}
|
||||
interval: {{ .Values.kubeProxy.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
{{- if .Values.kubeProxy.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.kubeProxy.serviceMonitor.proxyUrl}}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeProxy.serviceMonitor.https }}
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
{{- end}}
|
||||
{{- if .Values.kubeProxy.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.kubeProxy.serviceMonitor.metricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeProxy.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.kubeProxy.serviceMonitor.relabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,22 @@
|
||||
{{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.endpoints .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-scheduler
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler
|
||||
k8s-app: kube-scheduler
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
namespace: kube-system
|
||||
subsets:
|
||||
- addresses:
|
||||
{{- range .Values.kubeScheduler.endpoints }}
|
||||
- ip: {{ . }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.kubeScheduler.serviceMonitor.port }}
|
||||
{{- $kubeSchedulerDefaultInsecurePort := 10251 }}
|
||||
{{- $kubeSchedulerDefaultSecurePort := 10259 }}
|
||||
port: {{ include "kube-prometheus-stack.kubeScheduler.insecureScrape" (list . $kubeSchedulerDefaultInsecurePort $kubeSchedulerDefaultSecurePort .Values.kubeScheduler.service.port) }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
@ -0,0 +1,33 @@
|
||||
{{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.service.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-scheduler
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler
|
||||
jobLabel: kube-scheduler
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.kubeScheduler.service.ipDualStack.enabled }}
|
||||
ipFamilies: {{ toYaml .Values.kubeScheduler.service.ipDualStack.ipFamilies | nindent 4 }}
|
||||
ipFamilyPolicy: {{ .Values.kubeScheduler.service.ipDualStack.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.kubeScheduler.serviceMonitor.port }}
|
||||
{{- $kubeSchedulerDefaultInsecurePort := 10251 }}
|
||||
{{- $kubeSchedulerDefaultSecurePort := 10259 }}
|
||||
port: {{ include "kube-prometheus-stack.kubeScheduler.insecureScrape" (list . $kubeSchedulerDefaultInsecurePort $kubeSchedulerDefaultSecurePort .Values.kubeScheduler.service.port) }}
|
||||
protocol: TCP
|
||||
targetPort: {{ include "kube-prometheus-stack.kubeScheduler.insecureScrape" (list . $kubeSchedulerDefaultInsecurePort $kubeSchedulerDefaultSecurePort .Values.kubeScheduler.service.targetPort) }}
|
||||
{{- if .Values.kubeScheduler.endpoints }}{{- else }}
|
||||
selector:
|
||||
{{- if .Values.kubeScheduler.service.selector }}
|
||||
{{ toYaml .Values.kubeScheduler.service.selector | indent 4 }}
|
||||
{{- else}}
|
||||
component: kube-scheduler
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
type: ClusterIP
|
||||
{{- end -}}
|
||||
@ -0,0 +1,63 @@
|
||||
{{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-scheduler
|
||||
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
|
||||
namespace: kube-system
|
||||
{{- else }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler
|
||||
{{- with .Values.kubeScheduler.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
jobLabel: {{ .Values.kubeScheduler.serviceMonitor.jobLabel }}
|
||||
{{- with .Values.kubeScheduler.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- include "servicemonitor.scrapeLimits" .Values.kubeScheduler.serviceMonitor | nindent 2 }}
|
||||
selector:
|
||||
{{- if .Values.kubeScheduler.serviceMonitor.selector }}
|
||||
{{ tpl (toYaml .Values.kubeScheduler.serviceMonitor.selector | nindent 4) . }}
|
||||
{{- else }}
|
||||
matchLabels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler
|
||||
release: {{ $.Release.Name | quote }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "kube-system"
|
||||
endpoints:
|
||||
- port: {{ .Values.kubeScheduler.serviceMonitor.port }}
|
||||
{{- if .Values.kubeScheduler.serviceMonitor.interval }}
|
||||
interval: {{ .Values.kubeScheduler.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
{{- if .Values.kubeScheduler.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.kubeScheduler.serviceMonitor.proxyUrl}}
|
||||
{{- end }}
|
||||
{{- if eq (include "kube-prometheus-stack.kubeScheduler.insecureScrape" (list . false true .Values.kubeScheduler.serviceMonitor.https )) "true" }}
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
{{- if eq (include "kube-prometheus-stack.kubeScheduler.insecureScrape" (list . nil true .Values.kubeScheduler.serviceMonitor.insecureSkipVerify)) "true" }}
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
{{- if .Values.kubeScheduler.serviceMonitor.serverName }}
|
||||
serverName: {{ .Values.kubeScheduler.serviceMonitor.serverName }}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Values.kubeScheduler.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.kubeScheduler.serviceMonitor.metricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeScheduler.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.kubeScheduler.serviceMonitor.relabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,149 @@
|
||||
{{- if and .Values.kubelet.enabled .Values.kubelet.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-kubelet
|
||||
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
|
||||
namespace: {{ .Values.kubelet.namespace }}
|
||||
{{- else }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-kubelet
|
||||
{{- with .Values.kubelet.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
{{- include "servicemonitor.scrapeLimits" .Values.kubelet.serviceMonitor | nindent 2 }}
|
||||
{{- with .Values.kubelet.serviceMonitor.attachMetadata }}
|
||||
attachMetadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
jobLabel: k8s-app
|
||||
{{- with .Values.kubelet.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Values.kubelet.namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: kubelet
|
||||
k8s-app: kubelet
|
||||
endpoints:
|
||||
{{- if .Values.kubelet.serviceMonitor.kubelet }}
|
||||
- port: {{ template "kube-prometheus-stack.kubelet.scheme" . }}-metrics
|
||||
scheme: {{ template "kube-prometheus-stack.kubelet.scheme" . }}
|
||||
{{- if .Values.kubelet.serviceMonitor.interval }}
|
||||
interval: {{ .Values.kubelet.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.kubelet.serviceMonitor.proxyUrl }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.kubelet.serviceMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
{{- include "kube-prometheus-stack.kubelet.authConfig" . | indent 4 }}
|
||||
honorLabels: {{ .Values.kubelet.serviceMonitor.honorLabels }}
|
||||
honorTimestamps: {{ .Values.kubelet.serviceMonitor.honorTimestamps }}
|
||||
{{- if .Values.kubelet.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.kubelet.serviceMonitor.metricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.kubelet.serviceMonitor.relabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.cAdvisor }}
|
||||
- port: {{ template "kube-prometheus-stack.kubelet.scheme" . }}-metrics
|
||||
scheme: {{ template "kube-prometheus-stack.kubelet.scheme" . }}
|
||||
path: /metrics/cadvisor
|
||||
{{- if .Values.kubelet.serviceMonitor.interval }}
|
||||
interval: {{ .Values.kubelet.serviceMonitor.interval }}
|
||||
{{- else }}
|
||||
interval: {{ .Values.kubelet.serviceMonitor.cAdvisorInterval }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.kubelet.serviceMonitor.proxyUrl }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.kubelet.serviceMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
honorLabels: {{ .Values.kubelet.serviceMonitor.honorLabels }}
|
||||
{{- if .Values.kubelet.serviceMonitor.trackTimestampsStaleness }}
|
||||
honorTimestamps: true
|
||||
{{- else }}
|
||||
honorTimestamps: {{ .Values.kubelet.serviceMonitor.honorTimestamps }}
|
||||
{{- end }}
|
||||
trackTimestampsStaleness: {{ .Values.kubelet.serviceMonitor.trackTimestampsStaleness }}
|
||||
{{- include "kube-prometheus-stack.kubelet.authConfig" . | indent 4 }}
|
||||
{{- if .Values.kubelet.serviceMonitor.cAdvisorMetricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.kubelet.serviceMonitor.cAdvisorMetricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.cAdvisorRelabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.kubelet.serviceMonitor.cAdvisorRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.probes }}
|
||||
- port: {{ template "kube-prometheus-stack.kubelet.scheme" . }}-metrics
|
||||
scheme: {{ template "kube-prometheus-stack.kubelet.scheme" . }}
|
||||
path: /metrics/probes
|
||||
{{- if .Values.kubelet.serviceMonitor.interval }}
|
||||
interval: {{ .Values.kubelet.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.kubelet.serviceMonitor.proxyUrl }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.kubelet.serviceMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
honorLabels: {{ .Values.kubelet.serviceMonitor.honorLabels }}
|
||||
honorTimestamps: {{ .Values.kubelet.serviceMonitor.honorTimestamps }}
|
||||
{{- include "kube-prometheus-stack.kubelet.authConfig" . | indent 4 }}
|
||||
{{- if .Values.kubelet.serviceMonitor.probesMetricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.kubelet.serviceMonitor.probesMetricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.probesRelabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.kubelet.serviceMonitor.probesRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.resource }}
|
||||
- port: {{ template "kube-prometheus-stack.kubelet.scheme" . }}-metrics
|
||||
scheme: {{ template "kube-prometheus-stack.kubelet.scheme" . }}
|
||||
path: {{ .Values.kubelet.serviceMonitor.resourcePath }}
|
||||
{{- if .Values.kubelet.serviceMonitor.interval }}
|
||||
interval: {{ .Values.kubelet.serviceMonitor.interval }}
|
||||
{{- else }}
|
||||
interval: {{ .Values.kubelet.serviceMonitor.resourceInterval }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.proxyUrl }}
|
||||
proxyUrl: {{ .Values.kubelet.serviceMonitor.proxyUrl }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.kubelet.serviceMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
honorLabels: {{ .Values.kubelet.serviceMonitor.honorLabels }}
|
||||
{{- if .Values.kubelet.serviceMonitor.trackTimestampsStaleness }}
|
||||
honorTimestamps: true
|
||||
{{- else }}
|
||||
honorTimestamps: {{ .Values.kubelet.serviceMonitor.honorTimestamps }}
|
||||
{{- end }}
|
||||
trackTimestampsStaleness: {{ .Values.kubelet.serviceMonitor.trackTimestampsStaleness }}
|
||||
{{- include "kube-prometheus-stack.kubelet.authConfig" . | indent 4 }}
|
||||
{{- if .Values.kubelet.serviceMonitor.resourceMetricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.kubelet.serviceMonitor.resourceMetricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubelet.serviceMonitor.resourceRelabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.kubelet.serviceMonitor.resourceRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
15
charts/kube-prometheus-stack/templates/extra-objects.yaml
Normal file
15
charts/kube-prometheus-stack/templates/extra-objects.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
{{- /* Normalize extraObjects to a list, easier to loop over */ -}}
|
||||
{{- $extraObjects := .Values.extraManifests | default (list) -}}
|
||||
|
||||
{{- if kindIs "map" $extraObjects -}}
|
||||
{{- $extraObjects = values $extraObjects -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- range $extraObjects }}
|
||||
---
|
||||
{{- if kindIs "map" . }}
|
||||
{{- tpl (toYaml .) $ | nindent 0 }}
|
||||
{{- else if kindIs "string" . }}
|
||||
{{- tpl . $ | nindent 0 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,86 @@
|
||||
{{- if or (and .Values.grafana.enabled .Values.grafana.sidecar.datasources.enabled) .Values.grafana.forceDeployDatasources }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-grafana-datasource
|
||||
namespace: {{ template "kube-prometheus-stack-grafana.namespace" . }}
|
||||
{{- if .Values.grafana.sidecar.datasources.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.grafana.sidecar.datasources.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ $.Values.grafana.sidecar.datasources.label }}: {{ $.Values.grafana.sidecar.datasources.labelValue | quote }}
|
||||
app: {{ template "kube-prometheus-stack.name" $ }}-grafana
|
||||
{{ include "kube-prometheus-stack.labels" $ | indent 4 }}
|
||||
data:
|
||||
datasource.yaml: |-
|
||||
apiVersion: 1
|
||||
{{- if .Values.grafana.deleteDatasources }}
|
||||
deleteDatasources:
|
||||
{{ tpl (toYaml .Values.grafana.deleteDatasources | indent 6) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.grafana.prune }}
|
||||
prune: {{ .Values.grafana.prune }}
|
||||
{{- end }}
|
||||
datasources:
|
||||
{{- $scrapeInterval := .Values.grafana.sidecar.datasources.defaultDatasourceScrapeInterval | default .Values.prometheus.prometheusSpec.scrapeInterval | default "30s" }}
|
||||
{{- if .Values.grafana.sidecar.datasources.defaultDatasourceEnabled }}
|
||||
- name: "{{ .Values.grafana.sidecar.datasources.name }}"
|
||||
type: prometheus
|
||||
uid: {{ .Values.grafana.sidecar.datasources.uid }}
|
||||
{{- if .Values.grafana.sidecar.datasources.url }}
|
||||
url: {{ .Values.grafana.sidecar.datasources.url }}
|
||||
{{- else }}
|
||||
url: http://{{ template "kube-prometheus-stack.fullname" . }}-prometheus.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.prometheus.service.port }}/{{ trimPrefix "/" .Values.prometheus.prometheusSpec.routePrefix }}
|
||||
{{- end }}
|
||||
access: proxy
|
||||
isDefault: {{ .Values.grafana.sidecar.datasources.isDefaultDatasource }}
|
||||
jsonData:
|
||||
httpMethod: {{ .Values.grafana.sidecar.datasources.httpMethod }}
|
||||
timeInterval: {{ $scrapeInterval }}
|
||||
{{- if .Values.grafana.sidecar.datasources.timeout }}
|
||||
timeout: {{ .Values.grafana.sidecar.datasources.timeout }}
|
||||
{{- end }}
|
||||
{{- if .Values.grafana.sidecar.datasources.exemplarTraceIdDestinations }}
|
||||
exemplarTraceIdDestinations:
|
||||
- datasourceUid: {{ .Values.grafana.sidecar.datasources.exemplarTraceIdDestinations.datasourceUid }}
|
||||
name: {{ .Values.grafana.sidecar.datasources.exemplarTraceIdDestinations.traceIdLabelName }}
|
||||
urlDisplayLabel: {{ .Values.grafana.sidecar.datasources.exemplarTraceIdDestinations.urlDisplayLabel }}
|
||||
{{- end }}
|
||||
{{- if .Values.grafana.sidecar.datasources.createPrometheusReplicasDatasources }}
|
||||
{{- range until (int .Values.prometheus.prometheusSpec.replicas) }}
|
||||
- name: "{{ $.Values.grafana.sidecar.datasources.name }}-{{ . }}"
|
||||
type: prometheus
|
||||
uid: {{ $.Values.grafana.sidecar.datasources.uid }}-replica-{{ . }}
|
||||
url: http://prometheus-{{ template "kube-prometheus-stack.prometheus.crname" $ }}-{{ . }}.{{ $.Values.grafana.sidecar.datasources.prometheusServiceName}}:9090/{{ trimPrefix "/" $.Values.prometheus.prometheusSpec.routePrefix }}
|
||||
access: proxy
|
||||
isDefault: false
|
||||
jsonData:
|
||||
timeInterval: {{ $scrapeInterval }}
|
||||
{{- if $.Values.grafana.sidecar.datasources.exemplarTraceIdDestinations }}
|
||||
exemplarTraceIdDestinations:
|
||||
- datasourceUid: {{ $.Values.grafana.sidecar.datasources.exemplarTraceIdDestinations.datasourceUid }}
|
||||
name: {{ $.Values.grafana.sidecar.datasources.exemplarTraceIdDestinations.traceIdLabelName }}
|
||||
urlDisplayLabel: {{ .Values.grafana.sidecar.datasources.exemplarTraceIdDestinations.urlDisplayLabel }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.grafana.sidecar.datasources.alertmanager.enabled }}
|
||||
- name: "{{ .Values.grafana.sidecar.datasources.alertmanager.name }}"
|
||||
type: alertmanager
|
||||
uid: {{ .Values.grafana.sidecar.datasources.alertmanager.uid }}
|
||||
{{- if .Values.grafana.sidecar.datasources.alertmanager.url }}
|
||||
url: {{ .Values.grafana.sidecar.datasources.alertmanager.url }}
|
||||
{{- else }}
|
||||
url: http://{{ template "kube-prometheus-stack.fullname" . }}-alertmanager.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.alertmanager.service.port }}/{{ trimPrefix "/" .Values.alertmanager.alertmanagerSpec.routePrefix }}
|
||||
{{- end }}
|
||||
access: proxy
|
||||
jsonData:
|
||||
handleGrafanaManagedAlerts: {{ .Values.grafana.sidecar.datasources.alertmanager.handleGrafanaManagedAlerts }}
|
||||
implementation: {{ .Values.grafana.sidecar.datasources.alertmanager.implementation }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.grafana.additionalDataSources }}
|
||||
{{ tpl (toYaml .Values.grafana.additionalDataSources | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user