diff --git a/deploy/dev/charts/prometheus/.helmignore b/deploy/dev/charts/prometheus/.helmignore new file mode 100644 index 0000000..825c007 --- /dev/null +++ b/deploy/dev/charts/prometheus/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +OWNERS diff --git a/deploy/dev/charts/prometheus/Chart.lock b/deploy/dev/charts/prometheus/Chart.lock new file mode 100644 index 0000000..5de4799 --- /dev/null +++ b/deploy/dev/charts/prometheus/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 4.7.0 +digest: sha256:1ae96f01358013a1ef2df0a83c215f1c8390f10fcb64f6d014d1cd00eb7d7eeb +generated: "2022-04-20T09:25:53.255019562Z" diff --git a/deploy/dev/charts/prometheus/Chart.yaml b/deploy/dev/charts/prometheus/Chart.yaml new file mode 100644 index 0000000..315edf4 --- /dev/null +++ b/deploy/dev/charts/prometheus/Chart.yaml @@ -0,0 +1,28 @@ +apiVersion: v2 +appVersion: 2.34.0 +dependencies: +- condition: kubeStateMetrics.enabled + name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 4.7.* +description: Prometheus is a monitoring system and time series database. +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +maintainers: +- email: gianrubio@gmail.com + name: gianrubio +- email: zanhsieh@gmail.com + name: zanhsieh +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: naseem@transit.app + name: naseemkullah +name: prometheus +sources: +- https://github.com/prometheus/alertmanager +- https://github.com/prometheus/prometheus +- https://github.com/prometheus/pushgateway +- https://github.com/prometheus/node_exporter +- https://github.com/kubernetes/kube-state-metrics +type: application +version: 15.8.5 diff --git a/deploy/dev/charts/prometheus/README.md b/deploy/dev/charts/prometheus/README.md new file mode 100644 index 0000000..d8a1e9a --- /dev/null +++ b/deploy/dev/charts/prometheus/README.md @@ -0,0 +1,226 @@ +# Prometheus + +[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. + +This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.16+ +- Helm 3+ + +## Get Repo Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Dependencies + +By default this chart installs additional, dependent charts: + +- [kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics) + +To disable the dependency during installation, set `kubeStateMetrics.enabled` to `false`. + +_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### To 15.0 + +Version 15.0.0 changes the relabeling config, aligning it with the [Prometheus community conventions](https://github.com/prometheus/prometheus/pull/9832). If you've made manual changes to the relabeling config, you have to adapt your changes. + +Before you update please execute the following command, to be able to update kube-state-metrics: + +```bash +kubectl delete deployments.apps -l app.kubernetes.io/instance=prometheus,app.kubernetes.io/name=kube-state-metrics --cascade=orphan +``` + +### To 9.0 + +Version 9.0 adds a new option to enable or disable the Prometheus Server. This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. To install the server `server.enabled` must be set to `true`. + +### To 5.0 + +As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. + +Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/). + +Users of this chart will need to update their alerting rules to the new format before they can upgrade. + +### Example Migration + +Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: + +1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: + + ```yaml + alertmanager: + enabled: false + alertmanagerFiles: + alertmanager.yml: "" + kubeStateMetrics: + enabled: false + nodeExporter: + enabled: false + pushgateway: + enabled: false + server: + extraArgs: + storage.local.retention: 720h + serverFiles: + alerts: "" + prometheus.yml: "" + rules: "" + ``` + +1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. + + ```yaml + prometheus.yml: + ... + remote_read: + - url: http://prometheus-old/api/v1/read + ... + ``` + + Old data will be available when you query the new prometheus instance. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values prometheus-community/prometheus +``` + +You may similarly use the above configuration commands on each chart [dependency](#dependencies) to see it's configurations. + +### Scraping Pod Metrics via Annotations + +This chart uses a default configuration that causes prometheus to scrape a variety of kubernetes resource types, provided they have the correct annotations. In this section we describe how to configure pods to be scraped; for information on how other resource types can be scraped you can do a `helm template` to get the kubernetes resource definitions, and then reference the prometheus configuration in the ConfigMap against the prometheus documentation for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). + +In order to get prometheus to scrape pods, you must add annotations to the the pods as below: + +```yaml +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "8080" +``` + +You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. `prometheus.io/port` should be set to the port that your pod serves metrics from. Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be enclosed in double quotes. + +### Sharing Alerts Between Services + +Note that when [installing](#install-chart) or [upgrading](#upgrading-chart) you may use multiple values override files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, + +```yaml +# values.yaml +# ... + +# service1-alert.yaml +serverFiles: + alerts: + service1: + - alert: anAlert + # ... + +# service2-alert.yaml +serverFiles: + alerts: + service2: + - alert: anAlert + # ... +``` + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus -f values.yaml -f service1-alert.yaml -f service2-alert.yaml +``` + +### RBAC Configuration + +Roles and RoleBindings resources will be created automatically for `server` service. + +To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. + +> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. + +### ConfigMap Files + +AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. + +Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. + +### Ingress TLS + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```console +kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: + +```yaml +server: + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: prometheus-server-tls + hosts: + - prometheus.domain.com +``` + +### NetworkPolicy + +Enabling Network Policy for Prometheus will secure connections to Alert Manager and Kube State Metrics by only accepting connections from Prometheus Server. All inbound connections to Prometheus Server are still allowed. + +To enable network policy for Prometheus, install a networking plugin that implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. + +If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need to manually create a networkpolicy which allows it. diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/.helmignore b/deploy/dev/charts/prometheus/charts/kube-state-metrics/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/Chart.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/Chart.yaml new file mode 100644 index 0000000..2ee1909 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +appVersion: 2.4.1 +description: Install kube-state-metrics to generate and expose cluster-level metrics +home: https://github.com/kubernetes/kube-state-metrics/ +keywords: +- metric +- monitoring +- prometheus +- kubernetes +maintainers: +- email: tariq.ibrahim@mulesoft.com + name: tariq1890 +- email: manuel@rueg.eu + name: mrueg +- email: davidcalvertfr@gmail.com + name: dotdc +name: kube-state-metrics +sources: +- https://github.com/kubernetes/kube-state-metrics/ +type: application +version: 4.7.0 diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/README.md b/deploy/dev/charts/prometheus/charts/kube-state-metrics/README.md new file mode 100644 index 0000000..7c2e169 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/README.md @@ -0,0 +1,68 @@ +# kube-state-metrics Helm Chart + +Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics). + +## Get Repo Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +helm install [RELEASE_NAME] prometheus-community/kube-state-metrics [flags] +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] prometheus-community/kube-state-metrics [flags] +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Migrating from stable/kube-state-metrics and kubernetes/kube-state-metrics + +You can upgrade in-place: + +1. [get repo info](#get-repo-info) +1. [upgrade](#upgrading-chart) your existing release name using the new chart repo + + +## Upgrading to v3.0.0 + +v3.0.0 includes kube-state-metrics v2.0, see the [changelog](https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/CHANGELOG.md) for major changes on the application-side. + +The upgraded chart now the following changes: +* Dropped support for helm v2 (helm v3 or later is required) +* collectors key was renamed to resources +* namespace key was renamed to namespaces + + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: + +```console +helm show values prometheus-community/kube-state-metrics +``` + +You may also run `helm show values` on this chart's [dependencies](#dependencies) for additional options. diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt new file mode 100644 index 0000000..5a646e0 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt @@ -0,0 +1,10 @@ +kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. +The exposed metrics can be found here: +https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics + +The metrics are exported on the HTTP endpoint /metrics on the listening port. +In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics + +They are served either as plaintext or protobuf depending on the Accept header. +They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. + diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl new file mode 100644 index 0000000..976b273 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl @@ -0,0 +1,82 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kube-state-metrics.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kube-state-metrics.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kube-state-metrics.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "kube-state-metrics.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kube-state-metrics.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate basic labels +*/}} +{{- define "kube-state-metrics.labels" }} +helm.sh/chart: {{ template "kube-state-metrics.chart" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/component: metrics +app.kubernetes.io/part-of: {{ template "kube-state-metrics.name" . }} +{{- include "kube-state-metrics.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels }} +{{- end }} +{{- if .Values.releaseLabel }} +release: {{ .Release.Name }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kube-state-metrics.selectorLabels" }} +app.kubernetes.io/name: {{ include "kube-state-metrics.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..cf9f628 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.rbac.create .Values.rbac.useClusterRole -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} +{{- else }} + name: {{ template "kube-state-metrics.fullname" . }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml new file mode 100644 index 0000000..60f5d59 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml @@ -0,0 +1,151 @@ +apiVersion: apps/v1 +{{- if .Values.autosharding.enabled }} +kind: StatefulSet +{{- else }} +kind: Deployment +{{- end }} +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +spec: + selector: + matchLabels: + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + replicas: {{ .Values.replicas }} + {{- if .Values.autosharding.enabled }} + serviceName: {{ template "kube-state-metrics.fullname" . }} + volumeClaimTemplates: [] + {{- end }} + template: + metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 8 }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + hostNetwork: {{ .Values.hostNetwork }} + serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + {{- if .Values.autosharding.enabled }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- end }} + args: + {{- if .Values.extraArgs }} + {{- range .Values.extraArgs }} + - {{ . }} + {{- end }} + {{- end }} + {{- if .Values.service.port }} + - --port={{ .Values.service.port | default 8080}} + {{- end }} + {{- if .Values.collectors }} + - --resources={{ .Values.collectors | join "," }} + {{- end }} + {{- if .Values.metricLabelsAllowlist }} + - --metric-labels-allowlist={{ .Values.metricLabelsAllowlist | join "," }} + {{- end }} + {{- if .Values.metricAnnotationsAllowList }} + - --metric-annotations-allowlist={{ .Values.metricAnnotationsAllowList | join "," }} + {{- end }} + {{- if .Values.metricAllowlist }} + - --metric-allowlist={{ .Values.metricAllowlist | join "," }} + {{- end }} + {{- if .Values.metricDenylist }} + - --metric-denylist={{ .Values.metricDenylist | join "," }} + {{- end }} + {{- if .Values.namespaces }} + - --namespaces={{ tpl (.Values.namespaces | join ",") $ }} + {{- end }} + {{- if .Values.namespacesDenylist }} + - --namespaces-denylist={{ tpl (.Values.namespacesDenylist | join ",") $ }} + {{- end }} + {{- if .Values.autosharding.enabled }} + - --pod=$(POD_NAME) + - --pod-namespace=$(POD_NAMESPACE) + {{- end }} + {{- if .Values.kubeconfig.enabled }} + - --kubeconfig=/opt/k8s/.kube/config + {{- end }} + {{- if .Values.selfMonitor.telemetryHost }} + - --telemetry-host={{ .Values.selfMonitor.telemetryHost }} + {{- end }} + - --telemetry-port={{ .Values.selfMonitor.telemetryPort | default 8081 }} + {{- if .Values.kubeconfig.enabled }} + volumeMounts: + - name: kubeconfig + mountPath: /opt/k8s/.kube/ + readOnly: true + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + ports: + - containerPort: {{ .Values.service.port | default 8080}} + name: "http" + {{- if .Values.selfMonitor.enabled }} + - containerPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + name: "metrics" + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.service.port | default 8080}} + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: / + port: {{ .Values.service.port | default 8080}} + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} +{{- end }} +{{- if .Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 10 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.kubeconfig.enabled}} + volumes: + - name: kubeconfig + secret: + secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig + {{- end }} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml new file mode 100644 index 0000000..6af0084 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml @@ -0,0 +1,12 @@ +{{- if .Values.kubeconfig.enabled -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kube-state-metrics.fullname" . }}-kubeconfig + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +type: Opaque +data: + config: '{{ .Values.kubeconfig.secret }}' +{{- end -}} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml new file mode 100644 index 0000000..cbcf3a3 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..3299056 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml @@ -0,0 +1,39 @@ +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + volumes: + - 'secret' +{{- if .Values.podSecurityPolicy.additionalVolumes }} +{{ toYaml .Values.podSecurityPolicy.additionalVolumes | indent 4 }} +{{- end }} + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml new file mode 100644 index 0000000..69047d4 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +rules: +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} +- apiGroups: ['policy'] +{{- else }} +- apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml new file mode 100644 index 0000000..03c56d5 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/role.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/role.yaml new file mode 100644 index 0000000..e514e3c --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/role.yaml @@ -0,0 +1,187 @@ +{{- if and (eq .Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} +{{- range (ternary (split "," .Values.namespaces) (list "") (eq $.Values.rbac.useClusterRole false)) }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +{{- if eq $.Values.rbac.useClusterRole false }} +kind: Role +{{- else }} +kind: ClusterRole +{{- end }} +metadata: + labels: + {{- include "kube-state-metrics.labels" $ | indent 4 }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- if eq $.Values.rbac.useClusterRole false }} + namespace: {{ . }} +{{- end }} +rules: +{{ if has "certificatesigningrequests" $.Values.collectors }} +- apiGroups: ["certificates.k8s.io"] + resources: + - certificatesigningrequests + verbs: ["list", "watch"] +{{ end -}} +{{ if has "configmaps" $.Values.collectors }} +- apiGroups: [""] + resources: + - configmaps + verbs: ["list", "watch"] +{{ end -}} +{{ if has "cronjobs" $.Values.collectors }} +- apiGroups: ["batch"] + resources: + - cronjobs + verbs: ["list", "watch"] +{{ end -}} +{{ if has "daemonsets" $.Values.collectors }} +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "deployments" $.Values.collectors }} +- apiGroups: ["extensions", "apps"] + resources: + - deployments + verbs: ["list", "watch"] +{{ end -}} +{{ if has "endpoints" $.Values.collectors }} +- apiGroups: [""] + resources: + - endpoints + verbs: ["list", "watch"] +{{ end -}} +{{ if has "horizontalpodautoscalers" $.Values.collectors }} +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{ if has "ingresses" $.Values.collectors }} +- apiGroups: ["extensions", "networking.k8s.io"] + resources: + - ingresses + verbs: ["list", "watch"] +{{ end -}} +{{ if has "jobs" $.Values.collectors }} +- apiGroups: ["batch"] + resources: + - jobs + verbs: ["list", "watch"] +{{ end -}} +{{ if has "limitranges" $.Values.collectors }} +- apiGroups: [""] + resources: + - limitranges + verbs: ["list", "watch"] +{{ end -}} +{{ if has "mutatingwebhookconfigurations" $.Values.collectors }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - mutatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if has "namespaces" $.Values.collectors }} +- apiGroups: [""] + resources: + - namespaces + verbs: ["list", "watch"] +{{ end -}} +{{ if has "networkpolicies" $.Values.collectors }} +- apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: ["list", "watch"] +{{ end -}} +{{ if has "nodes" $.Values.collectors }} +- apiGroups: [""] + resources: + - nodes + verbs: ["list", "watch"] +{{ end -}} +{{ if has "persistentvolumeclaims" $.Values.collectors }} +- apiGroups: [""] + resources: + - persistentvolumeclaims + verbs: ["list", "watch"] +{{ end -}} +{{ if has "persistentvolumes" $.Values.collectors }} +- apiGroups: [""] + resources: + - persistentvolumes + verbs: ["list", "watch"] +{{ end -}} +{{ if has "poddisruptionbudgets" $.Values.collectors }} +- apiGroups: ["policy"] + resources: + - poddisruptionbudgets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "pods" $.Values.collectors }} +- apiGroups: [""] + resources: + - pods + verbs: ["list", "watch"] +{{ end -}} +{{ if has "replicasets" $.Values.collectors }} +- apiGroups: ["extensions", "apps"] + resources: + - replicasets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "replicationcontrollers" $.Values.collectors }} +- apiGroups: [""] + resources: + - replicationcontrollers + verbs: ["list", "watch"] +{{ end -}} +{{ if has "resourcequotas" $.Values.collectors }} +- apiGroups: [""] + resources: + - resourcequotas + verbs: ["list", "watch"] +{{ end -}} +{{ if has "secrets" $.Values.collectors }} +- apiGroups: [""] + resources: + - secrets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "services" $.Values.collectors }} +- apiGroups: [""] + resources: + - services + verbs: ["list", "watch"] +{{ end -}} +{{ if has "statefulsets" $.Values.collectors }} +- apiGroups: ["apps"] + resources: + - statefulsets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "storageclasses" $.Values.collectors }} +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["list", "watch"] +{{ end -}} +{{ if has "validatingwebhookconfigurations" $.Values.collectors }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - validatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if has "volumeattachments" $.Values.collectors }} +- apiGroups: ["storage.k8s.io"] + resources: + - volumeattachments + verbs: ["list", "watch"] +{{ end -}} +{{ if has "verticalpodautoscalers" $.Values.collectors }} +- apiGroups: ["autoscaling.k8s.io"] + resources: + - verticalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml new file mode 100644 index 0000000..135094f --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} +{{- range (split "," $.Values.namespaces) }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "kube-state-metrics.labels" $ | indent 4 }} + name: {{ template "kube-state-metrics.fullname" $ }} + namespace: {{ . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not $.Values.rbac.useExistingRole) }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- else }} + name: {{ $.Values.rbac.useExistingRole }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" $ }} + namespace: {{ template "kube-state-metrics.namespace" $ }} +{{- end -}} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/service.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/service.yaml new file mode 100644 index 0000000..5a2d8ea --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/service.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + annotations: + {{- if .Values.prometheusScrape }} + prometheus.io/scrape: '{{ .Values.prometheusScrape }}' + {{- end }} + {{- if .Values.service.annotations }} + {{- toYaml .Values.service.annotations | nindent 4 }} + {{- end }} +spec: + type: "{{ .Values.service.type }}" + ports: + - name: "http" + protocol: TCP + port: {{ .Values.service.port | default 8080}} + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: {{ .Values.service.port | default 8080}} + {{ if .Values.selfMonitor.enabled }} + - name: "metrics" + protocol: TCP + port: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + targetPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + {{ end }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.service.clusterIP }} + clusterIP: "{{ .Values.service.clusterIP }}" +{{- end }} + selector: + {{- include "kube-state-metrics.selectorLabels" . | indent 4 }} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml new file mode 100644 index 0000000..e1229eb --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} +{{- end }} +imagePullSecrets: +{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml new file mode 100644 index 0000000..93a5870 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml @@ -0,0 +1,66 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + {{- with .Values.prometheus.monitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.monitor.jobLabel }} + selector: + matchLabels: + {{- if .Values.prometheus.monitor.selectorOverride -}} + {{ toYaml .Values.prometheus.monitor.selectorOverride | nindent 6 }} + {{ else }} + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + {{- end }} + endpoints: + - port: http + {{- if .Values.prometheus.monitor.interval }} + interval: {{ .Values.prometheus.monitor.interval }} + {{- end }} + {{- if .Values.prometheus.monitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.monitor.scrapeTimeout }} + {{- end }} + {{- if .Values.prometheus.monitor.proxyUrl }} + proxyUrl: {{ .Values.prometheus.monitor.proxyUrl}} + {{- end }} + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.prometheus.monitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.prometheus.monitor.metricRelabelings | nindent 8 }} + {{- end }} + {{- if .Values.prometheus.monitor.relabelings }} + relabelings: + {{- toYaml .Values.prometheus.monitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.selfMonitor.enabled }} + - port: metrics + {{- if .Values.prometheus.monitor.interval }} + interval: {{ .Values.prometheus.monitor.interval }} + {{- end }} + {{- if .Values.prometheus.monitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.monitor.scrapeTimeout }} + {{- end }} + {{- if .Values.prometheus.monitor.proxyUrl }} + proxyUrl: {{ .Values.prometheus.monitor.proxyUrl}} + {{- end }} + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.prometheus.monitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.prometheus.monitor.metricRelabelings | nindent 8 }} + {{- end }} + {{- if .Values.prometheus.monitor.relabelings }} + relabelings: + {{- toYaml .Values.prometheus.monitor.relabelings | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml new file mode 100644 index 0000000..489de14 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - apps + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} + resources: + - statefulsets + verbs: + - get + - list + - watch +{{- end }} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml new file mode 100644 index 0000000..73b37a4 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/charts/kube-state-metrics/values.yaml b/deploy/dev/charts/prometheus/charts/kube-state-metrics/values.yaml new file mode 100644 index 0000000..89e0da7 --- /dev/null +++ b/deploy/dev/charts/prometheus/charts/kube-state-metrics/values.yaml @@ -0,0 +1,229 @@ +# Default values for kube-state-metrics. +prometheusScrape: true +image: + repository: k8s.gcr.io/kube-state-metrics/kube-state-metrics + tag: v2.4.1 + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - name: "image-pull-secret" + +# If set to true, this will deploy kube-state-metrics as a StatefulSet and the data +# will be automatically sharded across <.Values.replicas> pods using the built-in +# autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding +# This is an experimental feature and there are no stability guarantees. +autosharding: + enabled: false + +replicas: 1 + +# List of additional cli arguments to configure kube-state-metrics +# for example: --enable-gzip-encoding, --log-file, etc. +# all the possible args can be found here: https://github.com/kubernetes/kube-state-metrics/blob/master/docs/cli-arguments.md +extraArgs: [] + +service: + port: 8080 + # Default to clusterIP for backward compatibility + type: ClusterIP + nodePort: 0 + loadBalancerIP: "" + clusterIP: "" + annotations: {} + +## Additional labels to add to all resources +customLabels: {} + # app: kube-state-metrics + +## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box +releaseLabel: false + +hostNetwork: false + +rbac: + # If true, create & use RBAC resources + create: true + + # Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to it, rolename set here. + # useExistingRole: your-existing-role + + # If set to false - Run without Cluteradmin privs needed - ONLY works if namespace is also set (if useExistingRole is set this name is used as ClusterRole or Role to bind to) + useClusterRole: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created, require rbac true + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Reference to one or more secrets to be used when pulling images + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + imagePullSecrets: [] + # ServiceAccount annotations. + # Use case: AWS EKS IAM roles for service accounts + # ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + annotations: {} + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + jobLabel: "" + interval: "" + scrapeTimeout: "" + proxyUrl: "" + selectorOverride: {} + honorLabels: false + metricRelabelings: [] + relabelings: [] + +## Specify if a Pod Security Policy for kube-state-metrics must be created +## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + enabled: false + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + additionalVolumes: [] + +securityContext: + enabled: true + runAsGroup: 65534 + runAsUser: 65534 + fsGroup: 65534 + +## Specify security settings for a Container +## Allows overrides and additional options compared to (Pod) securityContext +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +containerSecurityContext: {} + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +## Affinity settings for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +affinity: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# Annotations to be added to the pod +podAnnotations: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} + +# Comma-separated list of metrics to be exposed. +# This list comprises of exact metric names and/or regex patterns. +# The allowlist and denylist are mutually exclusive. +metricAllowlist: [] + +# Comma-separated list of metrics not to be enabled. +# This list comprises of exact metric names and/or regex patterns. +# The allowlist and denylist are mutually exclusive. +metricDenylist: [] + +# Comma-separated list of additional Kubernetes label keys that will be used in the resource's +# labels metric. By default the metric contains only name and namespace labels. +# To include additional labels, provide a list of resource names in their plural form and Kubernetes +# label keys you would like to allow for them (Example: '=namespaces=[k8s-label-1,k8s-label-n,...],pods=[app],...)'. +# A single '*' can be provided per resource instead to allow any labels, but that has +# severe performance implications (Example: '=pods=[*]'). +metricLabelsAllowlist: [] + # - namespaces=[k8s-label-1,k8s-label-n] + +# Comma-separated list of Kubernetes annotations keys that will be used in the resource' +# labels metric. By default the metric contains only name and namespace labels. +# To include additional annotations provide a list of resource names in their plural form and Kubernetes +# annotation keys you would like to allow for them (Example: '=namespaces=[kubernetes.io/team,...],pods=[kubernetes.io/team],...)'. +# A single '*' can be provided per resource instead to allow any annotations, but that has +# severe performance implications (Example: '=pods=[*]'). +metricAnnotationsAllowList: [] + # - pods=[k8s-annotation-1,k8s-annotation-n] + +# Available collectors for kube-state-metrics. +# By default, all available resources are enabled, comment out to disable. +collectors: + - certificatesigningrequests + - configmaps + - cronjobs + - daemonsets + - deployments + - endpoints + - horizontalpodautoscalers + - ingresses + - jobs + - limitranges + - mutatingwebhookconfigurations + - namespaces + - networkpolicies + - nodes + - persistentvolumeclaims + - persistentvolumes + - poddisruptionbudgets + - pods + - replicasets + - replicationcontrollers + - resourcequotas + - secrets + - services + - statefulsets + - storageclasses + - validatingwebhookconfigurations + - volumeattachments + # - verticalpodautoscalers # not a default resource, see also: https://github.com/kubernetes/kube-state-metrics#enabling-verticalpodautoscalers + +# Enabling kubeconfig will pass the --kubeconfig argument to the container +kubeconfig: + enabled: false + # base64 encoded kube-config file + secret: + +# Comma-separated list of namespaces to be enabled for collecting resources. By default all namespaces are collected. +namespaces: "" + +# Comma-separated list of namespaces not to be enabled. If namespaces and namespaces-denylist are both set, +# only namespaces that are excluded in namespaces-denylist will be used. +namespacesDenylist: "" + +## Override the deployment namespace +## +namespaceOverride: "" + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + +## Provide a k8s version to define apiGroups for podSecurityPolicy Cluster Role. +## For example: kubeTargetVersionOverride: 1.14.9 +## +kubeTargetVersionOverride: "" + +# Enable self metrics configuration for service and Service Monitor +# Default values for telemetry configuration can be overridden +selfMonitor: + enabled: false + # telemetryHost: 0.0.0.0 + # telemetryPort: 8081 diff --git a/deploy/dev/charts/prometheus/templates/NOTES.txt b/deploy/dev/charts/prometheus/templates/NOTES.txt new file mode 100644 index 0000000..0e8868f --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/NOTES.txt @@ -0,0 +1,112 @@ +{{- if .Values.server.enabled -}} +The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.server.ingress.enabled -}} +From outside the cluster, the server URL(s) are: +{{- range .Values.server.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Prometheus server URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.server.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.server.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} +{{- else if contains "ClusterIP" .Values.server.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.server.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 +{{- end }} +{{- end }} + +{{- if .Values.server.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Server pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{ if .Values.alertmanager.enabled }} +The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.alertmanager.ingress.enabled -}} +From outside the cluster, the alertmanager URL(s) are: +{{- range .Values.alertmanager.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Alertmanager URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.alertmanager.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} +{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.alertmanager.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 +{{- end }} +{{- end }} + +{{- if .Values.alertmanager.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the AlertManager pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{- if .Values.nodeExporter.podSecurityPolicy.enabled }} +{{- else }} +################################################################################# +###### WARNING: Pod Security Policy has been moved to a global property. ##### +###### use .Values.podSecurityPolicy.enabled with pod-based ##### +###### annotations ##### +###### (e.g. .Values.nodeExporter.podSecurityPolicy.annotations) ##### +################################################################################# +{{- end }} + +{{ if .Values.pushgateway.enabled }} +The Prometheus PushGateway can be accessed via port {{ .Values.pushgateway.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.pushgateway.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.pushgateway.ingress.enabled -}} +From outside the cluster, the pushgateway URL(s) are: +{{- range .Values.pushgateway.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the PushGateway URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.pushgateway.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.pushgateway.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.pushgateway.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.pushgateway.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.pushgateway.service.servicePort }} +{{- else if contains "ClusterIP" .Values.pushgateway.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.pushgateway.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 +{{- end }} +{{- end }} +{{- end }} + +For more information on running Prometheus, visit: +https://prometheus.io/ diff --git a/deploy/dev/charts/prometheus/templates/_helpers.tpl b/deploy/dev/charts/prometheus/templates/_helpers.tpl new file mode 100644 index 0000000..065065c --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/_helpers.tpl @@ -0,0 +1,282 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create unified labels for prometheus components +*/}} +{{- define "prometheus.common.matchLabels" -}} +app: {{ template "prometheus.name" . }} +release: {{ .Release.Name }} +{{- end -}} + +{{- define "prometheus.common.metaLabels" -}} +chart: {{ template "prometheus.chart" . }} +heritage: {{ .Release.Service }} +{{- end -}} + +{{- define "prometheus.alertmanager.labels" -}} +{{ include "prometheus.alertmanager.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.alertmanager.matchLabels" -}} +component: {{ .Values.alertmanager.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.labels" -}} +{{ include "prometheus.nodeExporter.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.matchLabels" -}} +component: {{ .Values.nodeExporter.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.labels" -}} +{{ include "prometheus.pushgateway.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.matchLabels" -}} +component: {{ .Values.pushgateway.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.server.labels" -}} +{{ include "prometheus.server.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.server.matchLabels" -}} +component: {{ .Values.server.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified alertmanager name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} + +{{- define "prometheus.alertmanager.fullname" -}} +{{- if .Values.alertmanager.fullnameOverride -}} +{{- .Values.alertmanager.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified node-exporter name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.nodeExporter.fullname" -}} +{{- if .Values.nodeExporter.fullnameOverride -}} +{{- .Values.nodeExporter.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.fullname" -}} +{{- if .Values.server.fullnameOverride -}} +{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified pushgateway name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.pushgateway.fullname" -}} +{{- if .Values.pushgateway.fullnameOverride -}} +{{- .Values.pushgateway.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Get KubeVersion removing pre-release information. +*/}} +{{- define "prometheus.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "prometheus.deployment.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "prometheus.daemonset.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "prometheus.networkPolicy.apiVersion" -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "prometheus.podSecurityPolicy.apiVersion" -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "prometheus.kubeVersion" .)) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- else -}} + {{- print "extensions/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return if ingress is stable. +*/}} +{{- define "ingress.isStable" -}} + {{- eq (include "ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "ingress.supportsIngressClassName" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} +{{/* +Return if ingress supports pathType. +*/}} +{{- define "ingress.supportsPathType" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the alertmanager component +*/}} +{{- define "prometheus.serviceAccountName.alertmanager" -}} +{{- if .Values.serviceAccounts.alertmanager.create -}} + {{ default (include "prometheus.alertmanager.fullname" .) .Values.serviceAccounts.alertmanager.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.alertmanager.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the nodeExporter component +*/}} +{{- define "prometheus.serviceAccountName.nodeExporter" -}} +{{- if .Values.serviceAccounts.nodeExporter.create -}} + {{ default (include "prometheus.nodeExporter.fullname" .) .Values.serviceAccounts.nodeExporter.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.nodeExporter.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the pushgateway component +*/}} +{{- define "prometheus.serviceAccountName.pushgateway" -}} +{{- if .Values.serviceAccounts.pushgateway.create -}} + {{ default (include "prometheus.pushgateway.fullname" .) .Values.serviceAccounts.pushgateway.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.pushgateway.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the server component +*/}} +{{- define "prometheus.serviceAccountName.server" -}} +{{- if .Values.serviceAccounts.server.create -}} + {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.server.name }} +{{- end -}} +{{- end -}} + +{{/* +Define the prometheus.namespace template if set with forceNamespace or .Release.Namespace is set +*/}} +{{- define "prometheus.namespace" -}} +{{- if .Values.forceNamespace -}} +{{ printf "namespace: %s" .Values.forceNamespace }} +{{- else -}} +{{ printf "namespace: %s" .Release.Namespace }} +{{- end -}} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/clusterrole.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/clusterrole.yaml new file mode 100644 index 0000000..c732ff4 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/clusterrole.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.alertmanager.useClusterRole (not .Values.alertmanager.useExistingRole) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.alertmanager.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml new file mode 100644 index 0000000..6f13e98 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.alertmanager.useClusterRole -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if (not .Values.alertmanager.useExistingRole) }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{- else }} + name: {{ .Values.alertmanager.useExistingRole }} +{{- end }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/cm.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/cm.yaml new file mode 100644 index 0000000..cb09bf0 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/cm.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.alertmanager.enabled (and (empty .Values.alertmanager.configMapOverrideName) (empty .Values.alertmanager.configFromSecret)) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.alertmanagerFiles }} + {{- if $key | regexMatch ".*\\.ya?ml$" }} + {{ $key }}: | +{{ toYaml $value | default "{}" | indent 4 }} + {{- else }} + {{ $key }}: {{ toYaml $value | indent 4 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/deploy.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/deploy.yaml new file mode 100644 index 0000000..8633569 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/deploy.yaml @@ -0,0 +1,208 @@ +{{- if and .Values.alertmanager.enabled (not .Values.alertmanager.statefulSet.enabled) -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.alertmanager.deploymentAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + {{- if .Values.alertmanager.strategy }} + strategy: +{{ toYaml .Values.alertmanager.strategy | trim | indent 4 }} + {{ if eq .Values.alertmanager.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + {{- if .Values.alertmanager.podLabels}} + {{ toYaml .Values.alertmanager.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} + {{- if .Values.alertmanager.extraInitContainers }} + initContainers: +{{ toYaml .Values.alertmanager.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/{{ .Values.alertmanager.configFileName }} + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + {{- if .Values.alertmanager.service.enableMeshPeer }} + - --cluster.listen-address=0.0.0.0:6783 + - --cluster.advertise-address=[$(POD_IP)]:6783 + {{- else }} + - --cluster.listen-address= + {{- end }} + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + {{- range .Values.alertmanager.clusterPeers }} + - --cluster.peer={{ . }} + {{- end }} + + ports: + - containerPort: 9093 + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/-/ready + port: 9093 + {{- if .Values.alertmanager.probeHeaders }} + httpHeaders: + {{- range .Values.alertmanager.probeHeaders }} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.alertmanager.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + + {{- if .Values.configmapReload.alertmanager.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} + image: "{{ .Values.configmapReload.alertmanager.image.repository }}:{{ .Values.configmapReload.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9093{{ .Values.alertmanager.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.alertmanager.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + {{- if .Values.configmapReload.alertmanager.containerPort }} + ports: + - containerPort: {{ .Values.configmapReload.alertmanager.containerPort }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.alertmanager.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.alertmanager.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.alertmanager.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} + {{- end }} + volumes: + - name: config-volume + {{- if empty .Values.alertmanager.configFromSecret }} + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.alertmanager.configFromSecret }} + {{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.alertmanager.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.configmapReload.alertmanager.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.alertmanager.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + - name: storage-volume + {{- if .Values.alertmanager.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.alertmanager.persistentVolume.existingClaim }}{{ .Values.alertmanager.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.alertmanager.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.alertmanager.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/headless-svc.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/headless-svc.yaml new file mode 100644 index 0000000..8c402c4 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/headless-svc.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.alertmanager.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.statefulSet.headless.labels }} +{{ toYaml .Values.alertmanager.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }}-headless +{{ include "prometheus.namespace" . | indent 2 }} +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.alertmanager.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9093 +{{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/ingress.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/ingress.yaml new file mode 100644 index 0000000..2a7b67c --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/ingress.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.alertmanager.fullname" . }} +{{- $servicePort := .Values.alertmanager.service.servicePort -}} +{{- $ingressPath := .Values.alertmanager.ingress.path -}} +{{- $ingressPathType := .Values.alertmanager.ingress.pathType -}} +{{- $extraPaths := .Values.alertmanager.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.alertmanager.ingress.annotations }} + annotations: +{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- range $key, $value := .Values.alertmanager.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.alertmanager.ingress.ingressClassName }} + ingressClassName: {{ .Values.alertmanager.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.alertmanager.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.alertmanager.ingress.tls }} + tls: +{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/netpol.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/netpol.yaml new file mode 100644 index 0000000..e44ade6 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/netpol.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.alertmanager.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9093 +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/pdb.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/pdb.yaml new file mode 100644 index 0000000..41a92f3 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.alertmanager.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.alertmanager.labels" . | nindent 6 }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/psp.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/psp.yaml new file mode 100644 index 0000000..64fb130 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/psp.yaml @@ -0,0 +1,46 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + annotations: +{{- if .Values.alertmanager.podSecurityPolicy.annotations }} +{{ toYaml .Values.alertmanager.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.alertmanager.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/pvc.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/pvc.yaml new file mode 100644 index 0000000..160e296 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/pvc.yaml @@ -0,0 +1,37 @@ +{{- if not .Values.alertmanager.statefulSet.enabled -}} +{{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}} +{{- if not .Values.alertmanager.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }} +{{- if .Values.alertmanager.persistentVolume.storageClass }} +{{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.alertmanager.persistentVolume.volumeBindingMode }} + volumeBindingMode: "{{ .Values.alertmanager.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" +{{- if .Values.alertmanager.persistentVolume.selector }} + selector: + {{- toYaml .Values.alertmanager.persistentVolume.selector | nindent 4 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/role.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/role.yaml new file mode 100644 index 0000000..ce60eaf --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create (eq .Values.alertmanager.useClusterRole false) (not .Values.alertmanager.useExistingRole) -}} +{{- range $.Values.alertmanager.namespaces }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: Role +metadata: + labels: + {{- include "prometheus.alertmanager.labels" $ | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" $ }} + namespace: {{ . }} +rules: +{{- if $.Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.alertmanager.fullname" $ }} +{{- else }} + [] +{{- end }} +{{- end }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/rolebinding.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/rolebinding.yaml new file mode 100644 index 0000000..906d652 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create (eq .Values.alertmanager.useClusterRole false) -}} +{{ range $.Values.alertmanager.namespaces }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: RoleBinding +metadata: + labels: + {{- include "prometheus.alertmanager.labels" $ | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" $ }} + namespace: {{ . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.alertmanager" $ }} +{{ include "prometheus.namespace" $ | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not $.Values.alertmanager.useExistingRole) }} + name: {{ template "prometheus.alertmanager.fullname" $ }} +{{- else }} + name: {{ $.Values.alertmanager.useExistingRole }} +{{- end }} +{{- end }} +{{ end }} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/service.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/service.yaml new file mode 100644 index 0000000..9edc9ac --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/service.yaml @@ -0,0 +1,53 @@ +{{- if .Values.alertmanager.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.service.annotations }} + annotations: +{{ toYaml .Values.alertmanager.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.service.labels }} +{{ toYaml .Values.alertmanager.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.alertmanager.service.clusterIP }} + clusterIP: {{ .Values.alertmanager.service.clusterIP }} +{{- end }} +{{- if .Values.alertmanager.service.externalIPs }} + externalIPs: +{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.alertmanager.service.servicePort }} + protocol: TCP + targetPort: 9093 + {{- if .Values.alertmanager.service.nodePort }} + nodePort: {{ .Values.alertmanager.service.nodePort }} + {{- end }} +{{- if .Values.alertmanager.service.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- if .Values.alertmanager.service.sessionAffinity }} + sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }} +{{- end }} + type: "{{ .Values.alertmanager.service.type }}" +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/serviceaccount.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/serviceaccount.yaml new file mode 100644 index 0000000..a5d996a --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.alertmanager.enabled .Values.serviceAccounts.alertmanager.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.alertmanager.annotations | indent 4 }} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/alertmanager/sts.yaml b/deploy/dev/charts/prometheus/templates/alertmanager/sts.yaml new file mode 100644 index 0000000..1baef4b --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/alertmanager/sts.yaml @@ -0,0 +1,188 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.alertmanager.statefulSet.annotations }} + annotations: + {{ toYaml .Values.alertmanager.statefulSet.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + {{- if .Values.alertmanager.statefulSet.labels}} + {{ toYaml .Values.alertmanager.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + serviceName: {{ template "prometheus.alertmanager.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + podManagementPolicy: {{ .Values.alertmanager.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + {{- if .Values.alertmanager.podLabels}} + {{ toYaml .Values.alertmanager.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} +{{- end }} +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/alertmanager.yml + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - --cluster.advertise-address=[$(POD_IP)]:6783 + - --cluster.listen-address=0.0.0.0:6783 + {{- range $n := until (.Values.alertmanager.replicaCount | int) }} + - --cluster.peer={{ template "prometheus.alertmanager.fullname" $ }}-{{ $n }}.{{ template "prometheus.alertmanager.fullname" $ }}-headless:6783 + {{- end }} + {{- else }} + - --cluster.listen-address= + {{- end }} + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + + ports: + - containerPort: 9093 + {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - containerPort: 6783 + {{- end }} + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/#/status + port: 9093 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.configmapReload.alertmanager.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} + image: "{{ .Values.configmapReload.alertmanager.image.repository }}:{{ .Values.configmapReload.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://localhost:9093{{ .Values.alertmanager.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.configmapReload.alertmanager.port }} + ports: + - containerPort: {{ .Values.configmapReload.alertmanager.port }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + volumes: + - name: config-volume + {{- if empty .Values.alertmanager.configFromSecret }} + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.alertmanager.configFromSecret }} + {{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.alertmanager.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: + {{- if .Values.alertmanager.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.alertmanager.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} +{{- end }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/node-exporter/daemonset.yaml b/deploy/dev/charts/prometheus/templates/node-exporter/daemonset.yaml new file mode 100644 index 0000000..d1d5cf0 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/node-exporter/daemonset.yaml @@ -0,0 +1,150 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: {{ template "prometheus.daemonset.apiVersion" . }} +kind: DaemonSet +metadata: +{{- if .Values.nodeExporter.deploymentAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.deploymentAnnotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 6 }} + {{- if .Values.nodeExporter.updateStrategy }} + updateStrategy: +{{ toYaml .Values.nodeExporter.updateStrategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.nodeExporter.podAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 8 }} +{{- if .Values.nodeExporter.pod.labels }} +{{ toYaml .Values.nodeExporter.pod.labels | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.nodeExporter" . }} + {{- if .Values.nodeExporter.extraInitContainers }} + initContainers: +{{ toYaml .Values.nodeExporter.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.nodeExporter.priorityClassName }} + priorityClassName: "{{ .Values.nodeExporter.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }} + image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}" + imagePullPolicy: "{{ .Values.nodeExporter.image.pullPolicy }}" + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + {{- if .Values.nodeExporter.hostRootfs }} + - --path.rootfs=/host/root + {{- end }} + {{- if .Values.nodeExporter.hostNetwork }} + - --web.listen-address=:{{ .Values.nodeExporter.service.hostPort }} + {{- end }} + {{- range $key, $value := .Values.nodeExporter.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + ports: + - name: metrics + {{- if .Values.nodeExporter.hostNetwork }} + containerPort: {{ .Values.nodeExporter.service.hostPort }} + {{- else }} + containerPort: 9100 + {{- end }} + hostPort: {{ .Values.nodeExporter.service.hostPort }} + resources: +{{ toYaml .Values.nodeExporter.resources | indent 12 }} + {{- if .Values.nodeExporter.container.securityContext }} + securityContext: +{{ toYaml .Values.nodeExporter.container.securityContext | indent 12 }} + {{- end }} + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: sys + mountPath: /host/sys + readOnly: true + {{- if .Values.nodeExporter.hostRootfs }} + - name: root + mountPath: /host/root + mountPropagation: HostToContainer + readOnly: true + {{- end }} + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- if .mountPropagation }} + mountPropagation: {{ .mountPropagation }} + {{- end }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.hostNetwork }} + hostNetwork: true + {{- end }} + {{- if .Values.nodeExporter.hostPID }} + hostPID: true + {{- end }} + {{- if .Values.nodeExporter.tolerations }} + tolerations: +{{ toYaml .Values.nodeExporter.tolerations | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeExporter.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.nodeExporter.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.securityContext }} + securityContext: +{{ toYaml .Values.nodeExporter.securityContext | indent 8 }} + {{- end }} + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + {{- if .Values.nodeExporter.hostRootfs }} + - name: root + hostPath: + path: / + {{- end }} + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/node-exporter/psp.yaml b/deploy/dev/charts/prometheus/templates/node-exporter/psp.yaml new file mode 100644 index 0000000..bd9c73b --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/node-exporter/psp.yaml @@ -0,0 +1,55 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + annotations: +{{- if .Values.nodeExporter.podSecurityPolicy.annotations }} +{{ toYaml .Values.nodeExporter.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'hostPath' + - 'secret' + allowedHostPaths: + - pathPrefix: /proc + readOnly: true + - pathPrefix: /sys + readOnly: true + - pathPrefix: / + readOnly: true + {{- range .Values.nodeExporter.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: {{ .Values.nodeExporter.hostNetwork }} + hostPID: {{ .Values.nodeExporter.hostPID }} + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + hostPorts: + - min: 1 + max: 65535 +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/node-exporter/role.yaml b/deploy/dev/charts/prometheus/templates/node-exporter/role.yaml new file mode 100644 index 0000000..d8ef3ed --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/node-exporter/role.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if or (default .Values.nodeExporter.podSecurityPolicy.enabled false) (.Values.podSecurityPolicy.enabled) }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{ include "prometheus.namespace" . | indent 2 }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "prometheus.nodeExporter.fullname" . }} +{{- end }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/node-exporter/rolebinding.yaml b/deploy/dev/charts/prometheus/templates/node-exporter/rolebinding.yaml new file mode 100644 index 0000000..06914b7 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/node-exporter/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{ include "prometheus.namespace" . | indent 2 }} +roleRef: + kind: Role + name: {{ template "prometheus.nodeExporter.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{ include "prometheus.namespace" . | indent 2 }} +{{- end }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/node-exporter/serviceaccount.yaml b/deploy/dev/charts/prometheus/templates/node-exporter/serviceaccount.yaml new file mode 100644 index 0000000..0cf91af --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/node-exporter/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.nodeExporter.enabled .Values.serviceAccounts.nodeExporter.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.nodeExporter.annotations | indent 4 }} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/node-exporter/svc.yaml b/deploy/dev/charts/prometheus/templates/node-exporter/svc.yaml new file mode 100644 index 0000000..26d1eaa --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/node-exporter/svc.yaml @@ -0,0 +1,47 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.nodeExporter.service.annotations }} + annotations: +{{ toYaml .Values.nodeExporter.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{- if .Values.nodeExporter.service.labels }} +{{ toYaml .Values.nodeExporter.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.nodeExporter.service.clusterIP }} + clusterIP: {{ .Values.nodeExporter.service.clusterIP }} +{{- end }} +{{- if .Values.nodeExporter.service.externalIPs }} + externalIPs: +{{ toYaml .Values.nodeExporter.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.nodeExporter.service.loadBalancerIP }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.nodeExporter.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: metrics + {{- if .Values.nodeExporter.hostNetwork }} + port: {{ .Values.nodeExporter.service.hostPort }} + protocol: TCP + targetPort: {{ .Values.nodeExporter.service.hostPort }} + {{- else }} + port: {{ .Values.nodeExporter.service.servicePort }} + protocol: TCP + targetPort: 9100 + {{- end }} + selector: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 4 }} + type: "{{ .Values.nodeExporter.service.type }}" +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/pushgateway/clusterrole.yaml b/deploy/dev/charts/prometheus/templates/pushgateway/clusterrole.yaml new file mode 100644 index 0000000..76ecf05 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/pushgateway/clusterrole.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.pushgateway.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml b/deploy/dev/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml new file mode 100644 index 0000000..15770ee --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.pushgateway.fullname" . }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/pushgateway/deploy.yaml b/deploy/dev/charts/prometheus/templates/pushgateway/deploy.yaml new file mode 100644 index 0000000..ffdbfcc --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/pushgateway/deploy.yaml @@ -0,0 +1,119 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.pushgateway.deploymentAnnotations }} + annotations: + {{ toYaml .Values.pushgateway.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + replicas: {{ .Values.pushgateway.replicaCount }} + {{- if .Values.pushgateway.strategy }} + strategy: +{{ toYaml .Values.pushgateway.strategy | trim | indent 4 }} + {{ if eq .Values.pushgateway.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.pushgateway.podAnnotations }} + annotations: + {{ toYaml .Values.pushgateway.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 8 }} + {{- if .Values.pushgateway.podLabels }} + {{ toYaml .Values.pushgateway.podLabels | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }} + {{- if .Values.pushgateway.extraInitContainers }} + initContainers: +{{ toYaml .Values.pushgateway.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.pushgateway.priorityClassName }} + priorityClassName: "{{ .Values.pushgateway.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }} + image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}" + imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.pushgateway.extraArgs }} + {{- $stringvalue := toString $value }} + {{- if eq $stringvalue "true" }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + ports: + - containerPort: 9091 + livenessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/healthy + {{- else }} + path: /-/healthy + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + readinessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/ready + {{- else }} + path: /-/ready + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + resources: +{{ toYaml .Values.pushgateway.resources | indent 12 }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.pushgateway.persistentVolume.mountPath }}" + subPath: "{{ .Values.pushgateway.persistentVolume.subPath }}" + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.nodeSelector }} + nodeSelector: +{{ toYaml .Values.pushgateway.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.pushgateway.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.securityContext }} + securityContext: +{{ toYaml .Values.pushgateway.securityContext | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.tolerations }} + tolerations: +{{ toYaml .Values.pushgateway.tolerations | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.affinity }} + affinity: +{{ toYaml .Values.pushgateway.affinity | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumes: + - name: storage-volume + persistentVolumeClaim: + claimName: {{ if .Values.pushgateway.persistentVolume.existingClaim }}{{ .Values.pushgateway.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.pushgateway.fullname" . }}{{- end }} + {{- end -}} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/pushgateway/ingress.yaml b/deploy/dev/charts/prometheus/templates/pushgateway/ingress.yaml new file mode 100644 index 0000000..2ff72ab --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/pushgateway/ingress.yaml @@ -0,0 +1,54 @@ +{{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.pushgateway.fullname" . }} +{{- $servicePort := .Values.pushgateway.service.servicePort -}} +{{- $ingressPath := .Values.pushgateway.ingress.path -}} +{{- $ingressPathType := .Values.pushgateway.ingress.pathType -}} +{{- $extraPaths := .Values.pushgateway.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.pushgateway.ingress.annotations }} + annotations: +{{ toYaml .Values.pushgateway.ingress.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.pushgateway.ingress.ingressClassName }} + ingressClassName: {{ .Values.pushgateway.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.pushgateway.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.pushgateway.ingress.tls }} + tls: +{{ toYaml .Values.pushgateway.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/deploy/dev/charts/prometheus/templates/pushgateway/netpol.yaml b/deploy/dev/charts/prometheus/templates/pushgateway/netpol.yaml new file mode 100644 index 0000000..c8d1fb3 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/pushgateway/netpol.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.pushgateway.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9091 +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/pushgateway/pdb.yaml b/deploy/dev/charts/prometheus/templates/pushgateway/pdb.yaml new file mode 100644 index 0000000..50beb48 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/pushgateway/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.pushgateway.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.pushgateway.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.pushgateway.labels" . | nindent 6 }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/pushgateway/psp.yaml b/deploy/dev/charts/prometheus/templates/pushgateway/psp.yaml new file mode 100644 index 0000000..1ca3267 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/pushgateway/psp.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + annotations: +{{- if .Values.pushgateway.podSecurityPolicy.annotations }} +{{ toYaml .Values.pushgateway.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'persistentVolumeClaim' + - 'secret' + allowedHostPaths: + - pathPrefix: {{ .Values.pushgateway.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/pushgateway/pvc.yaml b/deploy/dev/charts/prometheus/templates/pushgateway/pvc.yaml new file mode 100644 index 0000000..d5d64dd --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/pushgateway/pvc.yaml @@ -0,0 +1,31 @@ +{{- if .Values.pushgateway.persistentVolume.enabled -}} +{{- if not .Values.pushgateway.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.pushgateway.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.pushgateway.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.pushgateway.persistentVolume.accessModes | indent 4 }} +{{- if .Values.pushgateway.persistentVolume.storageClass }} +{{- if (eq "-" .Values.pushgateway.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.pushgateway.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.pushgateway.persistentVolume.volumeBindingMode }} + volumeBindingMode: "{{ .Values.pushgateway.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.pushgateway.persistentVolume.size }}" +{{- end -}} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/pushgateway/service.yaml b/deploy/dev/charts/prometheus/templates/pushgateway/service.yaml new file mode 100644 index 0000000..f05f17c --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/pushgateway/service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.pushgateway.service.annotations }} + annotations: +{{ toYaml .Values.pushgateway.service.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +{{- if .Values.pushgateway.service.labels }} +{{ toYaml .Values.pushgateway.service.labels | indent 4}} +{{- end }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.pushgateway.service.clusterIP }} + clusterIP: {{ .Values.pushgateway.service.clusterIP }} +{{- end }} +{{- if .Values.pushgateway.service.externalIPs }} + externalIPs: +{{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.pushgateway.service.servicePort }} + protocol: TCP + targetPort: 9091 + selector: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 4 }} + type: "{{ .Values.pushgateway.service.type }}" +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/pushgateway/serviceaccount.yaml b/deploy/dev/charts/prometheus/templates/pushgateway/serviceaccount.yaml new file mode 100644 index 0000000..8c0b876 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/pushgateway/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.pushgateway.enabled .Values.serviceAccounts.pushgateway.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.pushgateway.annotations | indent 4 }} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/pushgateway/vpa.yaml b/deploy/dev/charts/prometheus/templates/pushgateway/vpa.yaml new file mode 100644 index 0000000..0ac54f9 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/pushgateway/vpa.yaml @@ -0,0 +1,20 @@ +{{- if .Values.pushgateway.enabled -}} +{{- if .Values.pushgateway.verticalAutoscaler.enabled -}} +apiVersion: autoscaling.k8s.io/v1beta2 +kind: VerticalPodAutoscaler +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }}-vpa +{{ include "prometheus.namespace" . | indent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" + kind: Deployment + name: {{ template "prometheus.pushgateway.fullname" . }} + updatePolicy: + updateMode: {{ .Values.pushgateway.verticalAutoscaler.updateMode | default "Off" | quote }} + resourcePolicy: + containerPolicies: {{ .Values.pushgateway.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} +{{- end -}} {{/* if .Values.pushgateway.verticalAutoscaler.enabled */}} +{{- end -}} {{/* .Values.pushgateway.enabled */}} diff --git a/deploy/dev/charts/prometheus/templates/server/clusterrole.yaml b/deploy/dev/charts/prometheus/templates/server/clusterrole.yaml new file mode 100644 index 0000000..2520235 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/clusterrole.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.server.enabled .Values.rbac.create (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.server.fullname" . }} +{{- end }} + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - nodes/metrics + - services + - endpoints + - pods + - ingresses + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses/status + - ingresses + verbs: + - get + - list + - watch + - nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/server/clusterrolebinding.yaml b/deploy/dev/charts/prometheus/templates/server/clusterrolebinding.yaml new file mode 100644 index 0000000..5a79611 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.server.enabled .Values.rbac.create (empty .Values.server.namespaces) (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.server.fullname" . }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/server/cm.yaml b/deploy/dev/charts/prometheus/templates/server/cm.yaml new file mode 100644 index 0000000..a0a813a --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/cm.yaml @@ -0,0 +1,85 @@ +{{- if .Values.server.enabled -}} +{{- if (empty .Values.server.configMapOverrideName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.ruleFiles }} + {{ $key }}: {{- toYaml $value | indent 2 }} +{{- end }} +{{- range $key, $value := .Values.serverFiles }} + {{ $key }}: | +{{- if eq $key "prometheus.yml" }} + global: +{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} +{{- if $root.Values.server.remoteWrite }} + remote_write: +{{ $root.Values.server.remoteWrite | toYaml | indent 4 }} +{{- end }} +{{- if $root.Values.server.remoteRead }} + remote_read: +{{ $root.Values.server.remoteRead | toYaml | indent 4 }} +{{- end }} +{{- end }} +{{- if eq $key "alerts" }} +{{- if and (not (empty $value)) (empty $value.groups) }} + groups: +{{- range $ruleKey, $ruleValue := $value }} + - name: {{ $ruleKey -}}.rules + rules: +{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} +{{- else }} +{{ toYaml $value | indent 4 }} +{{- end }} +{{- else }} +{{ toYaml $value | default "{}" | indent 4 }} +{{- end }} +{{- if eq $key "prometheus.yml" -}} +{{- if $root.Values.extraScrapeConfigs }} +{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} +{{- end -}} +{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }} + alerting: +{{- if $root.Values.alertRelabelConfigs }} +{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} + alertmanagers: +{{- if $root.Values.server.alertmanagers }} +{{ toYaml $root.Values.server.alertmanagers | indent 8 }} +{{- else }} + - kubernetes_sd_configs: + - role: pod + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if $root.Values.alertmanager.prefixURL }} + path_prefix: {{ $root.Values.alertmanager.prefixURL }} + {{- end }} + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + regex: {{ $root.Release.Namespace }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app] + regex: {{ template "prometheus.name" $root }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_component] + regex: alertmanager + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe] + regex: {{ index $root.Values.alertmanager.podAnnotations "prometheus.io/probe" | default ".*" }} + action: keep + - source_labels: [__meta_kubernetes_pod_container_port_number] + regex: "9093" + action: keep +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/server/deploy.yaml b/deploy/dev/charts/prometheus/templates/server/deploy.yaml new file mode 100644 index 0000000..eb86e90 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/deploy.yaml @@ -0,0 +1,324 @@ +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.server.deploymentAnnotations }} + annotations: + {{ toYaml .Values.server.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + {{- if .Values.server.strategy }} + strategy: +{{ toYaml .Values.server.strategy | trim | indent 4 }} + {{ if eq .Values.server.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + {{- if .Values.configmapReload.prometheus.containerPort }} + ports: + - containerPort: {{ .Values.configmapReload.prometheus.containerPort }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.defaultFlagsOverride }} + {{ toYaml .Values.server.defaultFlagsOverride | nindent 12}} + {{- else }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + {{- end }} + ports: + - containerPort: 9090 + {{- if .Values.server.hostPort }} + hostPort: {{ .Values.server.hostPort }} + {{- end }} + readinessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- if .Values.server.probeHeaders }} + httpHeaders: + {{- range .Values.server.probeHeaders}} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- if .Values.server.probeHeaders }} + httpHeaders: + {{- range .Values.server.probeHeaders}} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + {{- if .Values.server.startupProbe.enabled }} + startupProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- if .Values.server.probeHeaders }} + httpHeaders: + {{- range .Values.server.probeHeaders}} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + failureThreshold: {{ .Values.server.startupProbe.failureThreshold }} + periodSeconds: {{ .Values.server.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.server.startupProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.containerSecurityContext }} + securityContext: + {{- toYaml .Values.server.containerSecurityContext | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + hostNetwork: {{ .Values.server.hostNetwork }} + {{- if .Values.server.dnsPolicy }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + {{- if empty .Values.server.configFromSecret }} + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.server.configFromSecret }} + {{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} + - name: storage-volume + {{- if .Values.server.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/server/headless-svc.yaml b/deploy/dev/charts/prometheus/templates/server/headless-svc.yaml new file mode 100644 index 0000000..d519f4e --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/headless-svc.yaml @@ -0,0 +1,37 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.statefulSet.headless.labels }} +{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }}-headless +{{ include "prometheus.namespace" . | indent 2 }} +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.server.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.statefulSet.headless.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.statefulSet.headless.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.statefulSet.headless.gRPC.nodePort }} + nodePort: {{ .Values.server.statefulSet.headless.gRPC.nodePort }} + {{- end }} + {{- end }} + + selector: + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/server/ingress.yaml b/deploy/dev/charts/prometheus/templates/server/ingress.yaml new file mode 100644 index 0000000..000f39c --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/ingress.yaml @@ -0,0 +1,59 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.server.fullname" . }} +{{- $servicePort := .Values.server.service.servicePort -}} +{{- $ingressPath := .Values.server.ingress.path -}} +{{- $ingressPathType := .Values.server.ingress.pathType -}} +{{- $extraPaths := .Values.server.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.server.ingress.annotations }} + annotations: +{{ toYaml .Values.server.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- range $key, $value := .Values.server.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.server.ingress.ingressClassName }} + ingressClassName: {{ .Values.server.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.server.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.server.ingress.tls }} + tls: +{{ toYaml .Values.server.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/server/netpol.yaml b/deploy/dev/charts/prometheus/templates/server/netpol.yaml new file mode 100644 index 0000000..c8870e9 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/netpol.yaml @@ -0,0 +1,18 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + ingress: + - ports: + - port: 9090 +{{- end }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/server/pdb.yaml b/deploy/dev/charts/prometheus/templates/server/pdb.yaml new file mode 100644 index 0000000..364cb5b --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.server.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.server.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.server.labels" . | nindent 6 }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/server/psp.yaml b/deploy/dev/charts/prometheus/templates/server/psp.yaml new file mode 100644 index 0000000..e2b885f --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/psp.yaml @@ -0,0 +1,51 @@ +{{- if and .Values.server.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + annotations: +{{- if .Values.server.podSecurityPolicy.annotations }} +{{ toYaml .Values.server.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + allowedCapabilities: + - 'CHOWN' + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + - 'hostPath' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.server.persistentVolume.mountPath }} + {{- range .Values.server.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/server/pvc.yaml b/deploy/dev/charts/prometheus/templates/server/pvc.yaml new file mode 100644 index 0000000..a735536 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/pvc.yaml @@ -0,0 +1,39 @@ +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +{{- if .Values.server.persistentVolume.enabled -}} +{{- if not .Values.server.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} +{{- if .Values.server.persistentVolume.storageClass }} +{{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.server.persistentVolume.volumeBindingMode }} + volumeBindingMode: "{{ .Values.server.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" +{{- if .Values.server.persistentVolume.selector }} + selector: + {{- toYaml .Values.server.persistentVolume.selector | nindent 4 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/server/rolebinding.yaml b/deploy/dev/charts/prometheus/templates/server/rolebinding.yaml new file mode 100644 index 0000000..93ce3ee --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.server.enabled .Values.rbac.create .Values.server.useExistingClusterRoleName .Values.server.namespaces -}} +{{ range $.Values.server.namespaces -}} +--- +apiVersion: {{ template "rbac.apiVersion" $ }} +kind: RoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" $ | nindent 4 }} + name: {{ template "prometheus.server.fullname" $ }} + namespace: {{ . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" $ }} +{{ include "prometheus.namespace" $ | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $.Values.server.useExistingClusterRoleName }} +{{ end -}} +{{ end -}} diff --git a/deploy/dev/charts/prometheus/templates/server/service.yaml b/deploy/dev/charts/prometheus/templates/server/service.yaml new file mode 100644 index 0000000..01c5a4a --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/service.yaml @@ -0,0 +1,60 @@ +{{- if and .Values.server.enabled .Values.server.service.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.service.annotations }} + annotations: +{{ toYaml .Values.server.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.service.labels }} +{{ toYaml .Values.server.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} +{{- end }} +{{- if .Values.server.service.externalIPs }} + externalIPs: +{{ toYaml .Values.server.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.server.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} +{{- end }} +{{- if .Values.server.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.server.service.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.service.nodePort }} + nodePort: {{ .Values.server.service.nodePort }} + {{- end }} + {{- if .Values.server.service.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.service.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.service.gRPC.nodePort }} + nodePort: {{ .Values.server.service.gRPC.nodePort }} + {{- end }} + {{- end }} + selector: + {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} + statefulset.kubernetes.io/pod-name: {{ template "prometheus.server.fullname" . }}-{{ .Values.server.service.statefulsetReplica.replica }} + {{- else -}} + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- if .Values.server.service.sessionAffinity }} + sessionAffinity: {{ .Values.server.service.sessionAffinity }} +{{- end }} + {{- end }} + type: "{{ .Values.server.service.type }}" +{{- end -}} diff --git a/deploy/dev/charts/prometheus/templates/server/serviceaccount.yaml b/deploy/dev/charts/prometheus/templates/server/serviceaccount.yaml new file mode 100644 index 0000000..9c0502a --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.serviceAccounts.server.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.server" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.server.annotations | indent 4 }} +{{- end }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/server/sts.yaml b/deploy/dev/charts/prometheus/templates/server/sts.yaml new file mode 100644 index 0000000..24a9d92 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/sts.yaml @@ -0,0 +1,302 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.server.statefulSet.annotations }} + annotations: + {{ toYaml .Values.server.statefulSet.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- if .Values.server.statefulSet.labels}} + {{ toYaml .Values.server.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + serviceName: {{ template "prometheus.server.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + {{- if .Values.configmapReload.prometheus.containerPort }} + ports: + - containerPort: {{ .Values.configmapReload.prometheus.containerPort }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.defaultFlagsOverride }} + {{ toYaml .Values.server.defaultFlagsOverride | nindent 12}} + {{- else }} + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + {{- end }} + ports: + - containerPort: 9090 + {{- if .Values.server.hostPort }} + hostPort: {{ .Values.server.hostPort }} + {{- end }} + readinessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + hostNetwork: {{ .Values.server.hostNetwork }} + {{- if .Values.server.dnsPolicy }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + {{- if empty .Values.server.configFromSecret }} + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.server.configFromSecret }} + {{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} +{{- if .Values.server.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} +{{- end }} +{{- end }} +{{- end }} diff --git a/deploy/dev/charts/prometheus/templates/server/vpa.yaml b/deploy/dev/charts/prometheus/templates/server/vpa.yaml new file mode 100644 index 0000000..981a9b4 --- /dev/null +++ b/deploy/dev/charts/prometheus/templates/server/vpa.yaml @@ -0,0 +1,24 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.verticalAutoscaler.enabled -}} +apiVersion: autoscaling.k8s.io/v1beta2 +kind: VerticalPodAutoscaler +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }}-vpa +{{ include "prometheus.namespace" . | indent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" +{{- if .Values.server.statefulSet.enabled }} + kind: StatefulSet +{{- else }} + kind: Deployment +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + updatePolicy: + updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} + resourcePolicy: + containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} +{{- end -}} {{/* if .Values.server.verticalAutoscaler.enabled */}} +{{- end -}} {{/* .Values.server.enabled */}} diff --git a/deploy/dev/charts/prometheus/values.yaml b/deploy/dev/charts/prometheus/values.yaml new file mode 100644 index 0000000..64324eb --- /dev/null +++ b/deploy/dev/charts/prometheus/values.yaml @@ -0,0 +1,1840 @@ +rbac: + create: true + +podSecurityPolicy: + enabled: false + +imagePullSecrets: +# - name: "image-pull-secret" + +## Define serviceAccount names for components. Defaults to component's fully qualified name. +## +serviceAccounts: + alertmanager: + create: true + name: + annotations: {} + nodeExporter: + create: true + name: + annotations: {} + pushgateway: + create: true + name: + annotations: {} + server: + create: true + name: + annotations: {} + +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: true + + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a Role and RoleBinding in the defined namespaces ONLY + ## This makes alertmanager work - for users who do not have ClusterAdmin privs, but wants alertmanager to operate on their own namespaces, instead of clusterwide. + useClusterRole: true + + ## Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. + useExistingRole: false + + ## alertmanager container name + ## + name: alertmanager + + ## alertmanager container image + ## + image: + repository: quay.io/prometheus/alertmanager + tag: v0.23.0 + pullPolicy: IfNotPresent + + ## alertmanager priorityClassName + ## + priorityClassName: "" + + ## Custom HTTP headers for Readiness Probe + ## + ## Useful for providing HTTP Basic Auth to healthchecks + probeHeaders: [] + + ## Additional alertmanager container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access alertmanager + baseURL: "http://localhost:9093" + + ## Additional alertmanager container environment variable + ## For instance to add a http_proxy + ## + extraEnv: {} + + ## Additional alertmanager Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: alertmanager-secret-files + # readOnly: true + + ## Additional alertmanager Configmap mounts + extraConfigmapMounts: [] + # - name: template-files + # mountPath: /etc/config/templates.d + # configMap: alertmanager-template-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config + ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configFromSecret: "" + + ## The configuration file name to be loaded to alertmanager + ## Must match the key within configuration loaded from ConfigMap/Secret + ## + configFileName: alertmanager.yml + + ingress: + ## If true, alertmanager Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## alertmanager Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## alertmanager Ingress additional labels + ## + extraLabels: {} + + ## alertmanager Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - alertmanager.domain.com + # - domain.com/alertmanager + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## alertmanager Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - alertmanager.domain.com + + ## Alertmanager Deployment Strategy type + # strategy: + # type: Recreate + + ## Node tolerations for alertmanager scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for alertmanager pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, alertmanager will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## alertmanager data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## alertmanager data Persistent Volume Claim annotations + ## + annotations: {} + + ## alertmanager data Persistent Volume existing claim name + ## Requires alertmanager.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## alertmanager data Persistent Volume mount root path + ## + mountPath: /data + + ## alertmanager data Persistent Volume size + ## + size: 2Gi + + ## alertmanager data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## alertmanager data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of alertmanager data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Persistent Volume Claim Selector + ## Useful if Persistent Volumes have been provisioned in advance + ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + # selector: + # matchLabels: + # release: "stable" + # matchExpressions: + # - { key: environment, operator: In, values: [ dev ] } + + emptyDir: + ## alertmanager emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to alertmanager pods + ## + podAnnotations: {} + ## Tell prometheus to use a specific set of alertmanager pods + ## instead of all alertmanager pods found in the same namespace + ## Useful if you deploy multiple releases within the same namespace + ## + ## prometheus.io/probe: alertmanager-teamA + + ## Labels to be added to Prometheus AlertManager pods + ## + podLabels: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + enableMeshPeer: false + + servicePort: 80 + + ## alertmanager resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # Custom DNS configuration to be added to alertmanager pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to alertmanager pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + # enableMeshPeer : true + + ## List of IP addresses at which the alertmanager service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + # nodePort: 30000 + sessionAffinity: None + type: ClusterIP + + ## List of initial peers + ## Ref: https://github.com/prometheus/alertmanager/blob/main/README.md#high-availability + clusterPeers: [] + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/jimmidyson/configmap-reload +## +configmapReload: + prometheus: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.5.0 + pullPolicy: IfNotPresent + + # containerPort: 9533 + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + alertmanager: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.5.0 + pullPolicy: IfNotPresent + + # containerPort: 9533 + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +kubeStateMetrics: + ## If false, kube-state-metrics sub-chart will not be installed + ## + enabled: true + +## kube-state-metrics sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics +## +# kube-state-metrics: + +nodeExporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + ## If true, node-exporter pods share the host network namespace + ## + hostNetwork: true + + ## If true, node-exporter pods share the host PID namespace + ## + hostPID: true + + ## If true, node-exporter pods mounts host / at /host/root + ## + hostRootfs: true + + ## node-exporter container name + ## + name: node-exporter + + ## node-exporter container image + ## + image: + repository: quay.io/prometheus/node-exporter + tag: v1.3.0 + pullPolicy: IfNotPresent + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## node-exporter priorityClassName + ## + priorityClassName: "" + + ## Custom Update Strategy + ## + updateStrategy: + type: RollingUpdate + + ## Additional node-exporter container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional node-exporter hostPath mounts + ## + extraHostPathMounts: [] + # - name: textfile-dir + # mountPath: /srv/txt_collector + # hostPath: /var/lib/node-exporter + # readOnly: true + # mountPropagation: HostToContainer + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # configMap: certs-configmap + # readOnly: true + + ## Node tolerations for node-exporter scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for node-exporter pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to node-exporter pods + ## + podAnnotations: {} + + ## Labels to be added to node-exporter pods + ## + pod: + labels: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## node-exporter resource limits & requests + ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + container: + securityContext: + allowPrivilegeEscalation: false + # Custom DNS configuration to be added to node-exporter pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to node-exporter pods + ## + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} + + # Exposed as a headless service: + # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services + clusterIP: None + + ## List of IP addresses at which the node-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + hostPort: 9100 + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9100 + type: ClusterIP + +server: + ## Prometheus server container name + ## + enabled: true + + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a RoleBinding in the defined namespaces ONLY + ## + ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. + ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. + ## + ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. + ## + # useExistingClusterRoleName: nameofclusterrole + + ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. + # namespaces: + # - yournamespace + + name: server + + # sidecarContainers - add more containers to prometheus server + # Key/Value where Key is the sidecar `- name: ` + # Example: + # sidecarContainers: + # webserver: + # image: nginx + sidecarContainers: {} + + # sidecarTemplateValues - context to be used in template for sidecarContainers + # Example: + # sidecarTemplateValues: *your-custom-globals + # sidecarContainers: + # webserver: |- + # {{ include "webserver-container-template" . }} + # Template for `webserver-container-template` might looks like this: + # image: "{{ .Values.server.sidecarTemplateValues.repository }}:{{ .Values.server.sidecarTemplateValues.tag }}" + # ... + # + sidecarTemplateValues: {} + + ## Prometheus server container image + ## + image: + repository: quay.io/prometheus/prometheus + tag: v2.34.0 + pullPolicy: IfNotPresent + + ## prometheus server priorityClassName + ## + priorityClassName: "" + + ## EnableServiceLinks indicates whether information about services should be injected + ## into pod's environment variables, matching the syntax of Docker links. + ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. + ## + enableServiceLinks: true + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access prometheus + ## Maybe same with Ingress host name + baseURL: "" + + ## Additional server container environment variables + ## + ## You specify this manually like you would a raw deployment manifest. + ## This means you can bind in environment variables from secrets. + ## + ## e.g. static environment variable: + ## - name: DEMO_GREETING + ## value: "Hello from the environment" + ## + ## e.g. secret environment variable: + ## - name: USERNAME + ## valueFrom: + ## secretKeyRef: + ## name: mysecret + ## key: username + env: [] + + # List of flags to override default parameters, e.g: + # - --enable-feature=agent + # - --storage.agent.retention.max-time=30m + defaultFlagsOverride: [] + + extraFlags: + - web.enable-lifecycle + ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as + ## deleting time series. This is disabled by default. + # - web.enable-admin-api + ## + ## storage.tsdb.no-lockfile flag controls BD locking + # - storage.tsdb.no-lockfile + ## + ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) + # - storage.tsdb.wal-compression + + ## Path to a configuration file on prometheus server container FS + configPath: /etc/config/prometheus.yml + + ### The data directory used by prometheus to set --storage.tsdb.path + ### When empty server.persistentVolume.mountPath is used instead + storagePath: "" + + global: + ## How frequently to scrape targets by default + ## + scrape_interval: 1m + ## How long until a scrape request times out + ## + scrape_timeout: 10s + ## How frequently to evaluate rules + ## + evaluation_interval: 1m + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write + ## + remoteWrite: [] + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read + ## + remoteRead: [] + + ## Custom HTTP headers for Liveness/Readiness/Startup Probe + ## + ## Useful for providing HTTP Basic Auth to healthchecks + probeHeaders: [] + + ## Additional Prometheus server container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional Prometheus server Volume mounts + ## + extraVolumeMounts: [] + + ## Additional Prometheus server Volumes + ## + extraVolumes: [] + + ## Additional Prometheus server hostPath mounts + ## + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # subPath: "" + # hostPath: /etc/kubernetes/certs + # readOnly: true + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # subPath: "" + # configMap: certs-configmap + # readOnly: true + + ## Additional Prometheus server Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: prom-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress additional labels + ## + extraLabels: {} + + ## Prometheus server Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + # - domain.com/prometheus + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Server Deployment Strategy type + # strategy: + # type: Recreate + + ## hostAliases allows adding entries to /etc/hosts inside the containers + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Prometheus server data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Persistent Volume Claim Selector + ## Useful if Persistent Volumes have been provisioned in advance + ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + # selector: + # matchLabels: + # release: "stable" + # matchExpressions: + # - { key: environment, operator: In, values: [ dev ] } + + emptyDir: + ## Prometheus server emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + ## Labels to be added to Prometheus server pods + ## + podLabels: {} + + ## Prometheus AlertManager configuration + ## + alertmanagers: [] + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + servicePort: 80 + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## Prometheus server readiness and liveness probe initial delay and timeout + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## + tcpSocketProbeEnabled: false + probeScheme: HTTP + readinessProbeInitialDelay: 30 + readinessProbePeriodSeconds: 5 + readinessProbeTimeout: 4 + readinessProbeFailureThreshold: 3 + readinessProbeSuccessThreshold: 1 + livenessProbeInitialDelay: 30 + livenessProbePeriodSeconds: 15 + livenessProbeTimeout: 10 + livenessProbeFailureThreshold: 3 + livenessProbeSuccessThreshold: 1 + startupProbe: + enabled: false + periodSeconds: 5 + failureThreshold: 30 + timeoutSeconds: 10 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + # When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet + dnsPolicy: ClusterFirst + + # Use hostPort + # hostPort: 9090 + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-server' + + # Custom DNS configuration to be added to prometheus server pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + ## Security context to be added to server pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + ## If false, no Service will be created for the Prometheus server + ## + enabled: true + + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + sessionAffinity: None + type: ClusterIP + + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## If using a statefulSet (statefulSet.enabled=true), configure the + ## service to connect to a specific replica to have a consistent view + ## of the data. + statefulsetReplica: + enabled: false + replica: 0 + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + + ## Prometheus data retention period (default if not specified is 15 days) + ## + retention: "15d" + +pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: true + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## pushgateway container name + ## + name: pushgateway + + ## pushgateway container image + ## + image: + repository: prom/pushgateway + tag: v1.4.2 + pullPolicy: IfNotPresent + + ## pushgateway priorityClassName + ## + priorityClassName: "" + + ## Additional pushgateway container arguments + ## + ## for example: persistence.file: /data/pushgateway.data + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ingress: + ## If true, pushgateway Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## pushgateway Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## pushgateway Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - pushgateway.domain.com + # - domain.com/pushgateway + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## pushgateway Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - pushgateway.domain.com + + ## Node tolerations for pushgateway scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for pushgateway pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to pushgateway pods + ## + podAnnotations: {} + + ## Labels to be added to pushgateway pods + ## + podLabels: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## pushgateway resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-pushgateway' + + # Custom DNS configuration to be added to push-gateway pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to push-gateway pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + + service: + annotations: + prometheus.io/probe: pushgateway + labels: {} + clusterIP: "" + + ## List of IP addresses at which the pushgateway service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9091 + type: ClusterIP + + ## pushgateway Deployment Strategy type + # strategy: + # type: Recreate + + persistentVolume: + ## If true, pushgateway will create/use a Persistent Volume Claim + ## + enabled: false + + ## pushgateway data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## pushgateway data Persistent Volume Claim annotations + ## + annotations: {} + + ## pushgateway data Persistent Volume existing claim name + ## Requires pushgateway.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## pushgateway data Persistent Volume mount root path + ## + mountPath: /data + + ## pushgateway data Persistent Volume size + ## + size: 2Gi + + ## pushgateway data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## pushgateway data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of pushgateway data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + +## alertmanager ConfigMap entries +## +alertmanagerFiles: + alertmanager.yml: + global: {} + # slack_api_url: '' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + +## Prometheus server ConfigMap entries for rule files (allow prometheus labels interpolation) +ruleFiles: {} + +## Prometheus server ConfigMap entries +## +serverFiles: + + ## Alerts configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + alerting_rules.yml: {} + # groups: + # - name: Instances + # rules: + # - alert: InstanceDown + # expr: up == 0 + # for: 5m + # labels: + # severity: page + # annotations: + # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' + # summary: 'Instance {{ $labels.instance }} down' + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml + alerts: {} + + ## Records configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ + recording_rules.yml: {} + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics + + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + # This configuration will work only on kubelet 1.7.3+ + # As the scrape endpoints for cAdvisor have changed + # if you are using older version you need to change the replacement to + # replacement: /api/v1/nodes/$1:4194/proxy/metrics + # more info here https://github.com/coreos/prometheus-operator/issues/633 + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of + # `true`, except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints' + honor_labels: true + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # Scrape config for slow service endpoints; same as above, but with a larger + # timeout and a larger interval + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + honor_labels: true + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: service + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`, + # except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + honor_labels: true + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + + # Example Scrape config for pods which should be scraped slower. An useful example + # would be stackriver-exporter which queries an API on every scrape of the pod + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + +# adds additional scrape configs to prometheus.yml +# must be a string so you have to add a | after extraScrapeConfigs: +# example adds prometheus-blackbox-exporter scrape config +extraScrapeConfigs: + # - job_name: 'prometheus-blackbox-exporter' + # metrics_path: /probe + # params: + # module: [http_2xx] + # static_configs: + # - targets: + # - https://example.com + # relabel_configs: + # - source_labels: [__address__] + # target_label: __param_target + # - source_labels: [__param_target] + # target_label: instance + # - target_label: __address__ + # replacement: prometheus-blackbox-exporter:9115 + +# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager +# useful in H/A prometheus with different external labels but the same alerts +alertRelabelConfigs: + # alert_relabel_configs: + # - source_labels: [dc] + # regex: (.+)\d+ + # target_label: dc + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + +# Force namespace of namespaced resources +forceNamespace: null diff --git a/deploy/prod/charts/prometheus/.helmignore b/deploy/prod/charts/prometheus/.helmignore new file mode 100644 index 0000000..825c007 --- /dev/null +++ b/deploy/prod/charts/prometheus/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +OWNERS diff --git a/deploy/prod/charts/prometheus/Chart.lock b/deploy/prod/charts/prometheus/Chart.lock new file mode 100644 index 0000000..5de4799 --- /dev/null +++ b/deploy/prod/charts/prometheus/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 4.7.0 +digest: sha256:1ae96f01358013a1ef2df0a83c215f1c8390f10fcb64f6d014d1cd00eb7d7eeb +generated: "2022-04-20T09:25:53.255019562Z" diff --git a/deploy/prod/charts/prometheus/Chart.yaml b/deploy/prod/charts/prometheus/Chart.yaml new file mode 100644 index 0000000..315edf4 --- /dev/null +++ b/deploy/prod/charts/prometheus/Chart.yaml @@ -0,0 +1,28 @@ +apiVersion: v2 +appVersion: 2.34.0 +dependencies: +- condition: kubeStateMetrics.enabled + name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 4.7.* +description: Prometheus is a monitoring system and time series database. +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +maintainers: +- email: gianrubio@gmail.com + name: gianrubio +- email: zanhsieh@gmail.com + name: zanhsieh +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: naseem@transit.app + name: naseemkullah +name: prometheus +sources: +- https://github.com/prometheus/alertmanager +- https://github.com/prometheus/prometheus +- https://github.com/prometheus/pushgateway +- https://github.com/prometheus/node_exporter +- https://github.com/kubernetes/kube-state-metrics +type: application +version: 15.8.5 diff --git a/deploy/prod/charts/prometheus/README.md b/deploy/prod/charts/prometheus/README.md new file mode 100644 index 0000000..d8a1e9a --- /dev/null +++ b/deploy/prod/charts/prometheus/README.md @@ -0,0 +1,226 @@ +# Prometheus + +[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. + +This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.16+ +- Helm 3+ + +## Get Repo Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Dependencies + +By default this chart installs additional, dependent charts: + +- [kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics) + +To disable the dependency during installation, set `kubeStateMetrics.enabled` to `false`. + +_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### To 15.0 + +Version 15.0.0 changes the relabeling config, aligning it with the [Prometheus community conventions](https://github.com/prometheus/prometheus/pull/9832). If you've made manual changes to the relabeling config, you have to adapt your changes. + +Before you update please execute the following command, to be able to update kube-state-metrics: + +```bash +kubectl delete deployments.apps -l app.kubernetes.io/instance=prometheus,app.kubernetes.io/name=kube-state-metrics --cascade=orphan +``` + +### To 9.0 + +Version 9.0 adds a new option to enable or disable the Prometheus Server. This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. To install the server `server.enabled` must be set to `true`. + +### To 5.0 + +As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. + +Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/). + +Users of this chart will need to update their alerting rules to the new format before they can upgrade. + +### Example Migration + +Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: + +1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: + + ```yaml + alertmanager: + enabled: false + alertmanagerFiles: + alertmanager.yml: "" + kubeStateMetrics: + enabled: false + nodeExporter: + enabled: false + pushgateway: + enabled: false + server: + extraArgs: + storage.local.retention: 720h + serverFiles: + alerts: "" + prometheus.yml: "" + rules: "" + ``` + +1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. + + ```yaml + prometheus.yml: + ... + remote_read: + - url: http://prometheus-old/api/v1/read + ... + ``` + + Old data will be available when you query the new prometheus instance. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values prometheus-community/prometheus +``` + +You may similarly use the above configuration commands on each chart [dependency](#dependencies) to see it's configurations. + +### Scraping Pod Metrics via Annotations + +This chart uses a default configuration that causes prometheus to scrape a variety of kubernetes resource types, provided they have the correct annotations. In this section we describe how to configure pods to be scraped; for information on how other resource types can be scraped you can do a `helm template` to get the kubernetes resource definitions, and then reference the prometheus configuration in the ConfigMap against the prometheus documentation for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). + +In order to get prometheus to scrape pods, you must add annotations to the the pods as below: + +```yaml +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "8080" +``` + +You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. `prometheus.io/port` should be set to the port that your pod serves metrics from. Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be enclosed in double quotes. + +### Sharing Alerts Between Services + +Note that when [installing](#install-chart) or [upgrading](#upgrading-chart) you may use multiple values override files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, + +```yaml +# values.yaml +# ... + +# service1-alert.yaml +serverFiles: + alerts: + service1: + - alert: anAlert + # ... + +# service2-alert.yaml +serverFiles: + alerts: + service2: + - alert: anAlert + # ... +``` + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus -f values.yaml -f service1-alert.yaml -f service2-alert.yaml +``` + +### RBAC Configuration + +Roles and RoleBindings resources will be created automatically for `server` service. + +To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. + +> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. + +### ConfigMap Files + +AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. + +Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. + +### Ingress TLS + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```console +kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: + +```yaml +server: + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: prometheus-server-tls + hosts: + - prometheus.domain.com +``` + +### NetworkPolicy + +Enabling Network Policy for Prometheus will secure connections to Alert Manager and Kube State Metrics by only accepting connections from Prometheus Server. All inbound connections to Prometheus Server are still allowed. + +To enable network policy for Prometheus, install a networking plugin that implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. + +If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need to manually create a networkpolicy which allows it. diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/.helmignore b/deploy/prod/charts/prometheus/charts/kube-state-metrics/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/Chart.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/Chart.yaml new file mode 100644 index 0000000..2ee1909 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +appVersion: 2.4.1 +description: Install kube-state-metrics to generate and expose cluster-level metrics +home: https://github.com/kubernetes/kube-state-metrics/ +keywords: +- metric +- monitoring +- prometheus +- kubernetes +maintainers: +- email: tariq.ibrahim@mulesoft.com + name: tariq1890 +- email: manuel@rueg.eu + name: mrueg +- email: davidcalvertfr@gmail.com + name: dotdc +name: kube-state-metrics +sources: +- https://github.com/kubernetes/kube-state-metrics/ +type: application +version: 4.7.0 diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/README.md b/deploy/prod/charts/prometheus/charts/kube-state-metrics/README.md new file mode 100644 index 0000000..7c2e169 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/README.md @@ -0,0 +1,68 @@ +# kube-state-metrics Helm Chart + +Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics). + +## Get Repo Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +helm install [RELEASE_NAME] prometheus-community/kube-state-metrics [flags] +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] prometheus-community/kube-state-metrics [flags] +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Migrating from stable/kube-state-metrics and kubernetes/kube-state-metrics + +You can upgrade in-place: + +1. [get repo info](#get-repo-info) +1. [upgrade](#upgrading-chart) your existing release name using the new chart repo + + +## Upgrading to v3.0.0 + +v3.0.0 includes kube-state-metrics v2.0, see the [changelog](https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/CHANGELOG.md) for major changes on the application-side. + +The upgraded chart now the following changes: +* Dropped support for helm v2 (helm v3 or later is required) +* collectors key was renamed to resources +* namespace key was renamed to namespaces + + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: + +```console +helm show values prometheus-community/kube-state-metrics +``` + +You may also run `helm show values` on this chart's [dependencies](#dependencies) for additional options. diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt new file mode 100644 index 0000000..5a646e0 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt @@ -0,0 +1,10 @@ +kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. +The exposed metrics can be found here: +https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics + +The metrics are exported on the HTTP endpoint /metrics on the listening port. +In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics + +They are served either as plaintext or protobuf depending on the Accept header. +They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. + diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl new file mode 100644 index 0000000..976b273 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl @@ -0,0 +1,82 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kube-state-metrics.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kube-state-metrics.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kube-state-metrics.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "kube-state-metrics.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kube-state-metrics.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate basic labels +*/}} +{{- define "kube-state-metrics.labels" }} +helm.sh/chart: {{ template "kube-state-metrics.chart" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/component: metrics +app.kubernetes.io/part-of: {{ template "kube-state-metrics.name" . }} +{{- include "kube-state-metrics.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels }} +{{- end }} +{{- if .Values.releaseLabel }} +release: {{ .Release.Name }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kube-state-metrics.selectorLabels" }} +app.kubernetes.io/name: {{ include "kube-state-metrics.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..cf9f628 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.rbac.create .Values.rbac.useClusterRole -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} +{{- else }} + name: {{ template "kube-state-metrics.fullname" . }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml new file mode 100644 index 0000000..60f5d59 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml @@ -0,0 +1,151 @@ +apiVersion: apps/v1 +{{- if .Values.autosharding.enabled }} +kind: StatefulSet +{{- else }} +kind: Deployment +{{- end }} +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +spec: + selector: + matchLabels: + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + replicas: {{ .Values.replicas }} + {{- if .Values.autosharding.enabled }} + serviceName: {{ template "kube-state-metrics.fullname" . }} + volumeClaimTemplates: [] + {{- end }} + template: + metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 8 }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + hostNetwork: {{ .Values.hostNetwork }} + serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + {{- if .Values.autosharding.enabled }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- end }} + args: + {{- if .Values.extraArgs }} + {{- range .Values.extraArgs }} + - {{ . }} + {{- end }} + {{- end }} + {{- if .Values.service.port }} + - --port={{ .Values.service.port | default 8080}} + {{- end }} + {{- if .Values.collectors }} + - --resources={{ .Values.collectors | join "," }} + {{- end }} + {{- if .Values.metricLabelsAllowlist }} + - --metric-labels-allowlist={{ .Values.metricLabelsAllowlist | join "," }} + {{- end }} + {{- if .Values.metricAnnotationsAllowList }} + - --metric-annotations-allowlist={{ .Values.metricAnnotationsAllowList | join "," }} + {{- end }} + {{- if .Values.metricAllowlist }} + - --metric-allowlist={{ .Values.metricAllowlist | join "," }} + {{- end }} + {{- if .Values.metricDenylist }} + - --metric-denylist={{ .Values.metricDenylist | join "," }} + {{- end }} + {{- if .Values.namespaces }} + - --namespaces={{ tpl (.Values.namespaces | join ",") $ }} + {{- end }} + {{- if .Values.namespacesDenylist }} + - --namespaces-denylist={{ tpl (.Values.namespacesDenylist | join ",") $ }} + {{- end }} + {{- if .Values.autosharding.enabled }} + - --pod=$(POD_NAME) + - --pod-namespace=$(POD_NAMESPACE) + {{- end }} + {{- if .Values.kubeconfig.enabled }} + - --kubeconfig=/opt/k8s/.kube/config + {{- end }} + {{- if .Values.selfMonitor.telemetryHost }} + - --telemetry-host={{ .Values.selfMonitor.telemetryHost }} + {{- end }} + - --telemetry-port={{ .Values.selfMonitor.telemetryPort | default 8081 }} + {{- if .Values.kubeconfig.enabled }} + volumeMounts: + - name: kubeconfig + mountPath: /opt/k8s/.kube/ + readOnly: true + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + ports: + - containerPort: {{ .Values.service.port | default 8080}} + name: "http" + {{- if .Values.selfMonitor.enabled }} + - containerPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + name: "metrics" + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.service.port | default 8080}} + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: / + port: {{ .Values.service.port | default 8080}} + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} +{{- end }} +{{- if .Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 10 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.kubeconfig.enabled}} + volumes: + - name: kubeconfig + secret: + secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig + {{- end }} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml new file mode 100644 index 0000000..6af0084 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml @@ -0,0 +1,12 @@ +{{- if .Values.kubeconfig.enabled -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kube-state-metrics.fullname" . }}-kubeconfig + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +type: Opaque +data: + config: '{{ .Values.kubeconfig.secret }}' +{{- end -}} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml new file mode 100644 index 0000000..cbcf3a3 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..3299056 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml @@ -0,0 +1,39 @@ +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + volumes: + - 'secret' +{{- if .Values.podSecurityPolicy.additionalVolumes }} +{{ toYaml .Values.podSecurityPolicy.additionalVolumes | indent 4 }} +{{- end }} + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml new file mode 100644 index 0000000..69047d4 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +rules: +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} +- apiGroups: ['policy'] +{{- else }} +- apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml new file mode 100644 index 0000000..03c56d5 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/role.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/role.yaml new file mode 100644 index 0000000..e514e3c --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/role.yaml @@ -0,0 +1,187 @@ +{{- if and (eq .Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} +{{- range (ternary (split "," .Values.namespaces) (list "") (eq $.Values.rbac.useClusterRole false)) }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +{{- if eq $.Values.rbac.useClusterRole false }} +kind: Role +{{- else }} +kind: ClusterRole +{{- end }} +metadata: + labels: + {{- include "kube-state-metrics.labels" $ | indent 4 }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- if eq $.Values.rbac.useClusterRole false }} + namespace: {{ . }} +{{- end }} +rules: +{{ if has "certificatesigningrequests" $.Values.collectors }} +- apiGroups: ["certificates.k8s.io"] + resources: + - certificatesigningrequests + verbs: ["list", "watch"] +{{ end -}} +{{ if has "configmaps" $.Values.collectors }} +- apiGroups: [""] + resources: + - configmaps + verbs: ["list", "watch"] +{{ end -}} +{{ if has "cronjobs" $.Values.collectors }} +- apiGroups: ["batch"] + resources: + - cronjobs + verbs: ["list", "watch"] +{{ end -}} +{{ if has "daemonsets" $.Values.collectors }} +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "deployments" $.Values.collectors }} +- apiGroups: ["extensions", "apps"] + resources: + - deployments + verbs: ["list", "watch"] +{{ end -}} +{{ if has "endpoints" $.Values.collectors }} +- apiGroups: [""] + resources: + - endpoints + verbs: ["list", "watch"] +{{ end -}} +{{ if has "horizontalpodautoscalers" $.Values.collectors }} +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{ if has "ingresses" $.Values.collectors }} +- apiGroups: ["extensions", "networking.k8s.io"] + resources: + - ingresses + verbs: ["list", "watch"] +{{ end -}} +{{ if has "jobs" $.Values.collectors }} +- apiGroups: ["batch"] + resources: + - jobs + verbs: ["list", "watch"] +{{ end -}} +{{ if has "limitranges" $.Values.collectors }} +- apiGroups: [""] + resources: + - limitranges + verbs: ["list", "watch"] +{{ end -}} +{{ if has "mutatingwebhookconfigurations" $.Values.collectors }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - mutatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if has "namespaces" $.Values.collectors }} +- apiGroups: [""] + resources: + - namespaces + verbs: ["list", "watch"] +{{ end -}} +{{ if has "networkpolicies" $.Values.collectors }} +- apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: ["list", "watch"] +{{ end -}} +{{ if has "nodes" $.Values.collectors }} +- apiGroups: [""] + resources: + - nodes + verbs: ["list", "watch"] +{{ end -}} +{{ if has "persistentvolumeclaims" $.Values.collectors }} +- apiGroups: [""] + resources: + - persistentvolumeclaims + verbs: ["list", "watch"] +{{ end -}} +{{ if has "persistentvolumes" $.Values.collectors }} +- apiGroups: [""] + resources: + - persistentvolumes + verbs: ["list", "watch"] +{{ end -}} +{{ if has "poddisruptionbudgets" $.Values.collectors }} +- apiGroups: ["policy"] + resources: + - poddisruptionbudgets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "pods" $.Values.collectors }} +- apiGroups: [""] + resources: + - pods + verbs: ["list", "watch"] +{{ end -}} +{{ if has "replicasets" $.Values.collectors }} +- apiGroups: ["extensions", "apps"] + resources: + - replicasets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "replicationcontrollers" $.Values.collectors }} +- apiGroups: [""] + resources: + - replicationcontrollers + verbs: ["list", "watch"] +{{ end -}} +{{ if has "resourcequotas" $.Values.collectors }} +- apiGroups: [""] + resources: + - resourcequotas + verbs: ["list", "watch"] +{{ end -}} +{{ if has "secrets" $.Values.collectors }} +- apiGroups: [""] + resources: + - secrets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "services" $.Values.collectors }} +- apiGroups: [""] + resources: + - services + verbs: ["list", "watch"] +{{ end -}} +{{ if has "statefulsets" $.Values.collectors }} +- apiGroups: ["apps"] + resources: + - statefulsets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "storageclasses" $.Values.collectors }} +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["list", "watch"] +{{ end -}} +{{ if has "validatingwebhookconfigurations" $.Values.collectors }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - validatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if has "volumeattachments" $.Values.collectors }} +- apiGroups: ["storage.k8s.io"] + resources: + - volumeattachments + verbs: ["list", "watch"] +{{ end -}} +{{ if has "verticalpodautoscalers" $.Values.collectors }} +- apiGroups: ["autoscaling.k8s.io"] + resources: + - verticalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml new file mode 100644 index 0000000..135094f --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} +{{- range (split "," $.Values.namespaces) }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "kube-state-metrics.labels" $ | indent 4 }} + name: {{ template "kube-state-metrics.fullname" $ }} + namespace: {{ . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not $.Values.rbac.useExistingRole) }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- else }} + name: {{ $.Values.rbac.useExistingRole }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" $ }} + namespace: {{ template "kube-state-metrics.namespace" $ }} +{{- end -}} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/service.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/service.yaml new file mode 100644 index 0000000..5a2d8ea --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/service.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + annotations: + {{- if .Values.prometheusScrape }} + prometheus.io/scrape: '{{ .Values.prometheusScrape }}' + {{- end }} + {{- if .Values.service.annotations }} + {{- toYaml .Values.service.annotations | nindent 4 }} + {{- end }} +spec: + type: "{{ .Values.service.type }}" + ports: + - name: "http" + protocol: TCP + port: {{ .Values.service.port | default 8080}} + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: {{ .Values.service.port | default 8080}} + {{ if .Values.selfMonitor.enabled }} + - name: "metrics" + protocol: TCP + port: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + targetPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + {{ end }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.service.clusterIP }} + clusterIP: "{{ .Values.service.clusterIP }}" +{{- end }} + selector: + {{- include "kube-state-metrics.selectorLabels" . | indent 4 }} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml new file mode 100644 index 0000000..e1229eb --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} +{{- end }} +imagePullSecrets: +{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml new file mode 100644 index 0000000..93a5870 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml @@ -0,0 +1,66 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + {{- with .Values.prometheus.monitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.monitor.jobLabel }} + selector: + matchLabels: + {{- if .Values.prometheus.monitor.selectorOverride -}} + {{ toYaml .Values.prometheus.monitor.selectorOverride | nindent 6 }} + {{ else }} + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + {{- end }} + endpoints: + - port: http + {{- if .Values.prometheus.monitor.interval }} + interval: {{ .Values.prometheus.monitor.interval }} + {{- end }} + {{- if .Values.prometheus.monitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.monitor.scrapeTimeout }} + {{- end }} + {{- if .Values.prometheus.monitor.proxyUrl }} + proxyUrl: {{ .Values.prometheus.monitor.proxyUrl}} + {{- end }} + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.prometheus.monitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.prometheus.monitor.metricRelabelings | nindent 8 }} + {{- end }} + {{- if .Values.prometheus.monitor.relabelings }} + relabelings: + {{- toYaml .Values.prometheus.monitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.selfMonitor.enabled }} + - port: metrics + {{- if .Values.prometheus.monitor.interval }} + interval: {{ .Values.prometheus.monitor.interval }} + {{- end }} + {{- if .Values.prometheus.monitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.monitor.scrapeTimeout }} + {{- end }} + {{- if .Values.prometheus.monitor.proxyUrl }} + proxyUrl: {{ .Values.prometheus.monitor.proxyUrl}} + {{- end }} + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.prometheus.monitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.prometheus.monitor.metricRelabelings | nindent 8 }} + {{- end }} + {{- if .Values.prometheus.monitor.relabelings }} + relabelings: + {{- toYaml .Values.prometheus.monitor.relabelings | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml new file mode 100644 index 0000000..489de14 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - apps + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} + resources: + - statefulsets + verbs: + - get + - list + - watch +{{- end }} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml new file mode 100644 index 0000000..73b37a4 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/charts/kube-state-metrics/values.yaml b/deploy/prod/charts/prometheus/charts/kube-state-metrics/values.yaml new file mode 100644 index 0000000..89e0da7 --- /dev/null +++ b/deploy/prod/charts/prometheus/charts/kube-state-metrics/values.yaml @@ -0,0 +1,229 @@ +# Default values for kube-state-metrics. +prometheusScrape: true +image: + repository: k8s.gcr.io/kube-state-metrics/kube-state-metrics + tag: v2.4.1 + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - name: "image-pull-secret" + +# If set to true, this will deploy kube-state-metrics as a StatefulSet and the data +# will be automatically sharded across <.Values.replicas> pods using the built-in +# autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding +# This is an experimental feature and there are no stability guarantees. +autosharding: + enabled: false + +replicas: 1 + +# List of additional cli arguments to configure kube-state-metrics +# for example: --enable-gzip-encoding, --log-file, etc. +# all the possible args can be found here: https://github.com/kubernetes/kube-state-metrics/blob/master/docs/cli-arguments.md +extraArgs: [] + +service: + port: 8080 + # Default to clusterIP for backward compatibility + type: ClusterIP + nodePort: 0 + loadBalancerIP: "" + clusterIP: "" + annotations: {} + +## Additional labels to add to all resources +customLabels: {} + # app: kube-state-metrics + +## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box +releaseLabel: false + +hostNetwork: false + +rbac: + # If true, create & use RBAC resources + create: true + + # Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to it, rolename set here. + # useExistingRole: your-existing-role + + # If set to false - Run without Cluteradmin privs needed - ONLY works if namespace is also set (if useExistingRole is set this name is used as ClusterRole or Role to bind to) + useClusterRole: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created, require rbac true + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Reference to one or more secrets to be used when pulling images + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + imagePullSecrets: [] + # ServiceAccount annotations. + # Use case: AWS EKS IAM roles for service accounts + # ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + annotations: {} + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + jobLabel: "" + interval: "" + scrapeTimeout: "" + proxyUrl: "" + selectorOverride: {} + honorLabels: false + metricRelabelings: [] + relabelings: [] + +## Specify if a Pod Security Policy for kube-state-metrics must be created +## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + enabled: false + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + additionalVolumes: [] + +securityContext: + enabled: true + runAsGroup: 65534 + runAsUser: 65534 + fsGroup: 65534 + +## Specify security settings for a Container +## Allows overrides and additional options compared to (Pod) securityContext +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +containerSecurityContext: {} + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +## Affinity settings for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +affinity: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# Annotations to be added to the pod +podAnnotations: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} + +# Comma-separated list of metrics to be exposed. +# This list comprises of exact metric names and/or regex patterns. +# The allowlist and denylist are mutually exclusive. +metricAllowlist: [] + +# Comma-separated list of metrics not to be enabled. +# This list comprises of exact metric names and/or regex patterns. +# The allowlist and denylist are mutually exclusive. +metricDenylist: [] + +# Comma-separated list of additional Kubernetes label keys that will be used in the resource's +# labels metric. By default the metric contains only name and namespace labels. +# To include additional labels, provide a list of resource names in their plural form and Kubernetes +# label keys you would like to allow for them (Example: '=namespaces=[k8s-label-1,k8s-label-n,...],pods=[app],...)'. +# A single '*' can be provided per resource instead to allow any labels, but that has +# severe performance implications (Example: '=pods=[*]'). +metricLabelsAllowlist: [] + # - namespaces=[k8s-label-1,k8s-label-n] + +# Comma-separated list of Kubernetes annotations keys that will be used in the resource' +# labels metric. By default the metric contains only name and namespace labels. +# To include additional annotations provide a list of resource names in their plural form and Kubernetes +# annotation keys you would like to allow for them (Example: '=namespaces=[kubernetes.io/team,...],pods=[kubernetes.io/team],...)'. +# A single '*' can be provided per resource instead to allow any annotations, but that has +# severe performance implications (Example: '=pods=[*]'). +metricAnnotationsAllowList: [] + # - pods=[k8s-annotation-1,k8s-annotation-n] + +# Available collectors for kube-state-metrics. +# By default, all available resources are enabled, comment out to disable. +collectors: + - certificatesigningrequests + - configmaps + - cronjobs + - daemonsets + - deployments + - endpoints + - horizontalpodautoscalers + - ingresses + - jobs + - limitranges + - mutatingwebhookconfigurations + - namespaces + - networkpolicies + - nodes + - persistentvolumeclaims + - persistentvolumes + - poddisruptionbudgets + - pods + - replicasets + - replicationcontrollers + - resourcequotas + - secrets + - services + - statefulsets + - storageclasses + - validatingwebhookconfigurations + - volumeattachments + # - verticalpodautoscalers # not a default resource, see also: https://github.com/kubernetes/kube-state-metrics#enabling-verticalpodautoscalers + +# Enabling kubeconfig will pass the --kubeconfig argument to the container +kubeconfig: + enabled: false + # base64 encoded kube-config file + secret: + +# Comma-separated list of namespaces to be enabled for collecting resources. By default all namespaces are collected. +namespaces: "" + +# Comma-separated list of namespaces not to be enabled. If namespaces and namespaces-denylist are both set, +# only namespaces that are excluded in namespaces-denylist will be used. +namespacesDenylist: "" + +## Override the deployment namespace +## +namespaceOverride: "" + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + +## Provide a k8s version to define apiGroups for podSecurityPolicy Cluster Role. +## For example: kubeTargetVersionOverride: 1.14.9 +## +kubeTargetVersionOverride: "" + +# Enable self metrics configuration for service and Service Monitor +# Default values for telemetry configuration can be overridden +selfMonitor: + enabled: false + # telemetryHost: 0.0.0.0 + # telemetryPort: 8081 diff --git a/deploy/prod/charts/prometheus/templates/NOTES.txt b/deploy/prod/charts/prometheus/templates/NOTES.txt new file mode 100644 index 0000000..0e8868f --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/NOTES.txt @@ -0,0 +1,112 @@ +{{- if .Values.server.enabled -}} +The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.server.ingress.enabled -}} +From outside the cluster, the server URL(s) are: +{{- range .Values.server.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Prometheus server URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.server.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.server.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} +{{- else if contains "ClusterIP" .Values.server.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.server.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 +{{- end }} +{{- end }} + +{{- if .Values.server.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Server pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{ if .Values.alertmanager.enabled }} +The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.alertmanager.ingress.enabled -}} +From outside the cluster, the alertmanager URL(s) are: +{{- range .Values.alertmanager.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Alertmanager URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.alertmanager.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} +{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.alertmanager.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 +{{- end }} +{{- end }} + +{{- if .Values.alertmanager.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the AlertManager pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{- if .Values.nodeExporter.podSecurityPolicy.enabled }} +{{- else }} +################################################################################# +###### WARNING: Pod Security Policy has been moved to a global property. ##### +###### use .Values.podSecurityPolicy.enabled with pod-based ##### +###### annotations ##### +###### (e.g. .Values.nodeExporter.podSecurityPolicy.annotations) ##### +################################################################################# +{{- end }} + +{{ if .Values.pushgateway.enabled }} +The Prometheus PushGateway can be accessed via port {{ .Values.pushgateway.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.pushgateway.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.pushgateway.ingress.enabled -}} +From outside the cluster, the pushgateway URL(s) are: +{{- range .Values.pushgateway.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the PushGateway URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.pushgateway.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.pushgateway.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.pushgateway.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.pushgateway.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.pushgateway.service.servicePort }} +{{- else if contains "ClusterIP" .Values.pushgateway.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.pushgateway.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 +{{- end }} +{{- end }} +{{- end }} + +For more information on running Prometheus, visit: +https://prometheus.io/ diff --git a/deploy/prod/charts/prometheus/templates/_helpers.tpl b/deploy/prod/charts/prometheus/templates/_helpers.tpl new file mode 100644 index 0000000..065065c --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/_helpers.tpl @@ -0,0 +1,282 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create unified labels for prometheus components +*/}} +{{- define "prometheus.common.matchLabels" -}} +app: {{ template "prometheus.name" . }} +release: {{ .Release.Name }} +{{- end -}} + +{{- define "prometheus.common.metaLabels" -}} +chart: {{ template "prometheus.chart" . }} +heritage: {{ .Release.Service }} +{{- end -}} + +{{- define "prometheus.alertmanager.labels" -}} +{{ include "prometheus.alertmanager.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.alertmanager.matchLabels" -}} +component: {{ .Values.alertmanager.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.labels" -}} +{{ include "prometheus.nodeExporter.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.matchLabels" -}} +component: {{ .Values.nodeExporter.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.labels" -}} +{{ include "prometheus.pushgateway.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.matchLabels" -}} +component: {{ .Values.pushgateway.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.server.labels" -}} +{{ include "prometheus.server.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.server.matchLabels" -}} +component: {{ .Values.server.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified alertmanager name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} + +{{- define "prometheus.alertmanager.fullname" -}} +{{- if .Values.alertmanager.fullnameOverride -}} +{{- .Values.alertmanager.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified node-exporter name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.nodeExporter.fullname" -}} +{{- if .Values.nodeExporter.fullnameOverride -}} +{{- .Values.nodeExporter.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.fullname" -}} +{{- if .Values.server.fullnameOverride -}} +{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified pushgateway name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.pushgateway.fullname" -}} +{{- if .Values.pushgateway.fullnameOverride -}} +{{- .Values.pushgateway.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Get KubeVersion removing pre-release information. +*/}} +{{- define "prometheus.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "prometheus.deployment.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "prometheus.daemonset.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "prometheus.networkPolicy.apiVersion" -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "prometheus.podSecurityPolicy.apiVersion" -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "prometheus.kubeVersion" .)) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- else -}} + {{- print "extensions/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return if ingress is stable. +*/}} +{{- define "ingress.isStable" -}} + {{- eq (include "ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "ingress.supportsIngressClassName" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} +{{/* +Return if ingress supports pathType. +*/}} +{{- define "ingress.supportsPathType" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the alertmanager component +*/}} +{{- define "prometheus.serviceAccountName.alertmanager" -}} +{{- if .Values.serviceAccounts.alertmanager.create -}} + {{ default (include "prometheus.alertmanager.fullname" .) .Values.serviceAccounts.alertmanager.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.alertmanager.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the nodeExporter component +*/}} +{{- define "prometheus.serviceAccountName.nodeExporter" -}} +{{- if .Values.serviceAccounts.nodeExporter.create -}} + {{ default (include "prometheus.nodeExporter.fullname" .) .Values.serviceAccounts.nodeExporter.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.nodeExporter.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the pushgateway component +*/}} +{{- define "prometheus.serviceAccountName.pushgateway" -}} +{{- if .Values.serviceAccounts.pushgateway.create -}} + {{ default (include "prometheus.pushgateway.fullname" .) .Values.serviceAccounts.pushgateway.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.pushgateway.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the server component +*/}} +{{- define "prometheus.serviceAccountName.server" -}} +{{- if .Values.serviceAccounts.server.create -}} + {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.server.name }} +{{- end -}} +{{- end -}} + +{{/* +Define the prometheus.namespace template if set with forceNamespace or .Release.Namespace is set +*/}} +{{- define "prometheus.namespace" -}} +{{- if .Values.forceNamespace -}} +{{ printf "namespace: %s" .Values.forceNamespace }} +{{- else -}} +{{ printf "namespace: %s" .Release.Namespace }} +{{- end -}} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/clusterrole.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/clusterrole.yaml new file mode 100644 index 0000000..c732ff4 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/clusterrole.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.alertmanager.useClusterRole (not .Values.alertmanager.useExistingRole) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.alertmanager.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml new file mode 100644 index 0000000..6f13e98 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.alertmanager.useClusterRole -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if (not .Values.alertmanager.useExistingRole) }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{- else }} + name: {{ .Values.alertmanager.useExistingRole }} +{{- end }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/cm.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/cm.yaml new file mode 100644 index 0000000..cb09bf0 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/cm.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.alertmanager.enabled (and (empty .Values.alertmanager.configMapOverrideName) (empty .Values.alertmanager.configFromSecret)) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.alertmanagerFiles }} + {{- if $key | regexMatch ".*\\.ya?ml$" }} + {{ $key }}: | +{{ toYaml $value | default "{}" | indent 4 }} + {{- else }} + {{ $key }}: {{ toYaml $value | indent 4 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/deploy.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/deploy.yaml new file mode 100644 index 0000000..8633569 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/deploy.yaml @@ -0,0 +1,208 @@ +{{- if and .Values.alertmanager.enabled (not .Values.alertmanager.statefulSet.enabled) -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.alertmanager.deploymentAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + {{- if .Values.alertmanager.strategy }} + strategy: +{{ toYaml .Values.alertmanager.strategy | trim | indent 4 }} + {{ if eq .Values.alertmanager.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + {{- if .Values.alertmanager.podLabels}} + {{ toYaml .Values.alertmanager.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} + {{- if .Values.alertmanager.extraInitContainers }} + initContainers: +{{ toYaml .Values.alertmanager.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/{{ .Values.alertmanager.configFileName }} + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + {{- if .Values.alertmanager.service.enableMeshPeer }} + - --cluster.listen-address=0.0.0.0:6783 + - --cluster.advertise-address=[$(POD_IP)]:6783 + {{- else }} + - --cluster.listen-address= + {{- end }} + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + {{- range .Values.alertmanager.clusterPeers }} + - --cluster.peer={{ . }} + {{- end }} + + ports: + - containerPort: 9093 + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/-/ready + port: 9093 + {{- if .Values.alertmanager.probeHeaders }} + httpHeaders: + {{- range .Values.alertmanager.probeHeaders }} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.alertmanager.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + + {{- if .Values.configmapReload.alertmanager.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} + image: "{{ .Values.configmapReload.alertmanager.image.repository }}:{{ .Values.configmapReload.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9093{{ .Values.alertmanager.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.alertmanager.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + {{- if .Values.configmapReload.alertmanager.containerPort }} + ports: + - containerPort: {{ .Values.configmapReload.alertmanager.containerPort }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.alertmanager.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.alertmanager.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.alertmanager.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} + {{- end }} + volumes: + - name: config-volume + {{- if empty .Values.alertmanager.configFromSecret }} + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.alertmanager.configFromSecret }} + {{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.alertmanager.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.configmapReload.alertmanager.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.alertmanager.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + - name: storage-volume + {{- if .Values.alertmanager.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.alertmanager.persistentVolume.existingClaim }}{{ .Values.alertmanager.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.alertmanager.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.alertmanager.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/headless-svc.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/headless-svc.yaml new file mode 100644 index 0000000..8c402c4 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/headless-svc.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.alertmanager.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.statefulSet.headless.labels }} +{{ toYaml .Values.alertmanager.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }}-headless +{{ include "prometheus.namespace" . | indent 2 }} +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.alertmanager.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9093 +{{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/ingress.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/ingress.yaml new file mode 100644 index 0000000..2a7b67c --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/ingress.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.alertmanager.fullname" . }} +{{- $servicePort := .Values.alertmanager.service.servicePort -}} +{{- $ingressPath := .Values.alertmanager.ingress.path -}} +{{- $ingressPathType := .Values.alertmanager.ingress.pathType -}} +{{- $extraPaths := .Values.alertmanager.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.alertmanager.ingress.annotations }} + annotations: +{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- range $key, $value := .Values.alertmanager.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.alertmanager.ingress.ingressClassName }} + ingressClassName: {{ .Values.alertmanager.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.alertmanager.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.alertmanager.ingress.tls }} + tls: +{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/netpol.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/netpol.yaml new file mode 100644 index 0000000..e44ade6 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/netpol.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.alertmanager.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9093 +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/pdb.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/pdb.yaml new file mode 100644 index 0000000..41a92f3 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.alertmanager.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.alertmanager.labels" . | nindent 6 }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/psp.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/psp.yaml new file mode 100644 index 0000000..64fb130 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/psp.yaml @@ -0,0 +1,46 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + annotations: +{{- if .Values.alertmanager.podSecurityPolicy.annotations }} +{{ toYaml .Values.alertmanager.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.alertmanager.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/pvc.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/pvc.yaml new file mode 100644 index 0000000..160e296 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/pvc.yaml @@ -0,0 +1,37 @@ +{{- if not .Values.alertmanager.statefulSet.enabled -}} +{{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}} +{{- if not .Values.alertmanager.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }} +{{- if .Values.alertmanager.persistentVolume.storageClass }} +{{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.alertmanager.persistentVolume.volumeBindingMode }} + volumeBindingMode: "{{ .Values.alertmanager.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" +{{- if .Values.alertmanager.persistentVolume.selector }} + selector: + {{- toYaml .Values.alertmanager.persistentVolume.selector | nindent 4 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/role.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/role.yaml new file mode 100644 index 0000000..ce60eaf --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create (eq .Values.alertmanager.useClusterRole false) (not .Values.alertmanager.useExistingRole) -}} +{{- range $.Values.alertmanager.namespaces }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: Role +metadata: + labels: + {{- include "prometheus.alertmanager.labels" $ | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" $ }} + namespace: {{ . }} +rules: +{{- if $.Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.alertmanager.fullname" $ }} +{{- else }} + [] +{{- end }} +{{- end }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/rolebinding.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/rolebinding.yaml new file mode 100644 index 0000000..906d652 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create (eq .Values.alertmanager.useClusterRole false) -}} +{{ range $.Values.alertmanager.namespaces }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: RoleBinding +metadata: + labels: + {{- include "prometheus.alertmanager.labels" $ | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" $ }} + namespace: {{ . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.alertmanager" $ }} +{{ include "prometheus.namespace" $ | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not $.Values.alertmanager.useExistingRole) }} + name: {{ template "prometheus.alertmanager.fullname" $ }} +{{- else }} + name: {{ $.Values.alertmanager.useExistingRole }} +{{- end }} +{{- end }} +{{ end }} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/service.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/service.yaml new file mode 100644 index 0000000..9edc9ac --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/service.yaml @@ -0,0 +1,53 @@ +{{- if .Values.alertmanager.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.service.annotations }} + annotations: +{{ toYaml .Values.alertmanager.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.service.labels }} +{{ toYaml .Values.alertmanager.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.alertmanager.service.clusterIP }} + clusterIP: {{ .Values.alertmanager.service.clusterIP }} +{{- end }} +{{- if .Values.alertmanager.service.externalIPs }} + externalIPs: +{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.alertmanager.service.servicePort }} + protocol: TCP + targetPort: 9093 + {{- if .Values.alertmanager.service.nodePort }} + nodePort: {{ .Values.alertmanager.service.nodePort }} + {{- end }} +{{- if .Values.alertmanager.service.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- if .Values.alertmanager.service.sessionAffinity }} + sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }} +{{- end }} + type: "{{ .Values.alertmanager.service.type }}" +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/serviceaccount.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/serviceaccount.yaml new file mode 100644 index 0000000..a5d996a --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.alertmanager.enabled .Values.serviceAccounts.alertmanager.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.alertmanager.annotations | indent 4 }} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/alertmanager/sts.yaml b/deploy/prod/charts/prometheus/templates/alertmanager/sts.yaml new file mode 100644 index 0000000..1baef4b --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/alertmanager/sts.yaml @@ -0,0 +1,188 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.alertmanager.statefulSet.annotations }} + annotations: + {{ toYaml .Values.alertmanager.statefulSet.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + {{- if .Values.alertmanager.statefulSet.labels}} + {{ toYaml .Values.alertmanager.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + serviceName: {{ template "prometheus.alertmanager.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + podManagementPolicy: {{ .Values.alertmanager.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + {{- if .Values.alertmanager.podLabels}} + {{ toYaml .Values.alertmanager.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} +{{- end }} +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/alertmanager.yml + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - --cluster.advertise-address=[$(POD_IP)]:6783 + - --cluster.listen-address=0.0.0.0:6783 + {{- range $n := until (.Values.alertmanager.replicaCount | int) }} + - --cluster.peer={{ template "prometheus.alertmanager.fullname" $ }}-{{ $n }}.{{ template "prometheus.alertmanager.fullname" $ }}-headless:6783 + {{- end }} + {{- else }} + - --cluster.listen-address= + {{- end }} + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + + ports: + - containerPort: 9093 + {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - containerPort: 6783 + {{- end }} + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/#/status + port: 9093 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.configmapReload.alertmanager.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} + image: "{{ .Values.configmapReload.alertmanager.image.repository }}:{{ .Values.configmapReload.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://localhost:9093{{ .Values.alertmanager.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.configmapReload.alertmanager.port }} + ports: + - containerPort: {{ .Values.configmapReload.alertmanager.port }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + volumes: + - name: config-volume + {{- if empty .Values.alertmanager.configFromSecret }} + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.alertmanager.configFromSecret }} + {{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.alertmanager.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: + {{- if .Values.alertmanager.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.alertmanager.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} +{{- end }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/node-exporter/daemonset.yaml b/deploy/prod/charts/prometheus/templates/node-exporter/daemonset.yaml new file mode 100644 index 0000000..d1d5cf0 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/node-exporter/daemonset.yaml @@ -0,0 +1,150 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: {{ template "prometheus.daemonset.apiVersion" . }} +kind: DaemonSet +metadata: +{{- if .Values.nodeExporter.deploymentAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.deploymentAnnotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 6 }} + {{- if .Values.nodeExporter.updateStrategy }} + updateStrategy: +{{ toYaml .Values.nodeExporter.updateStrategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.nodeExporter.podAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 8 }} +{{- if .Values.nodeExporter.pod.labels }} +{{ toYaml .Values.nodeExporter.pod.labels | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.nodeExporter" . }} + {{- if .Values.nodeExporter.extraInitContainers }} + initContainers: +{{ toYaml .Values.nodeExporter.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.nodeExporter.priorityClassName }} + priorityClassName: "{{ .Values.nodeExporter.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }} + image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}" + imagePullPolicy: "{{ .Values.nodeExporter.image.pullPolicy }}" + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + {{- if .Values.nodeExporter.hostRootfs }} + - --path.rootfs=/host/root + {{- end }} + {{- if .Values.nodeExporter.hostNetwork }} + - --web.listen-address=:{{ .Values.nodeExporter.service.hostPort }} + {{- end }} + {{- range $key, $value := .Values.nodeExporter.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + ports: + - name: metrics + {{- if .Values.nodeExporter.hostNetwork }} + containerPort: {{ .Values.nodeExporter.service.hostPort }} + {{- else }} + containerPort: 9100 + {{- end }} + hostPort: {{ .Values.nodeExporter.service.hostPort }} + resources: +{{ toYaml .Values.nodeExporter.resources | indent 12 }} + {{- if .Values.nodeExporter.container.securityContext }} + securityContext: +{{ toYaml .Values.nodeExporter.container.securityContext | indent 12 }} + {{- end }} + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: sys + mountPath: /host/sys + readOnly: true + {{- if .Values.nodeExporter.hostRootfs }} + - name: root + mountPath: /host/root + mountPropagation: HostToContainer + readOnly: true + {{- end }} + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- if .mountPropagation }} + mountPropagation: {{ .mountPropagation }} + {{- end }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.hostNetwork }} + hostNetwork: true + {{- end }} + {{- if .Values.nodeExporter.hostPID }} + hostPID: true + {{- end }} + {{- if .Values.nodeExporter.tolerations }} + tolerations: +{{ toYaml .Values.nodeExporter.tolerations | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeExporter.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.nodeExporter.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.securityContext }} + securityContext: +{{ toYaml .Values.nodeExporter.securityContext | indent 8 }} + {{- end }} + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + {{- if .Values.nodeExporter.hostRootfs }} + - name: root + hostPath: + path: / + {{- end }} + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/node-exporter/psp.yaml b/deploy/prod/charts/prometheus/templates/node-exporter/psp.yaml new file mode 100644 index 0000000..bd9c73b --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/node-exporter/psp.yaml @@ -0,0 +1,55 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + annotations: +{{- if .Values.nodeExporter.podSecurityPolicy.annotations }} +{{ toYaml .Values.nodeExporter.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'hostPath' + - 'secret' + allowedHostPaths: + - pathPrefix: /proc + readOnly: true + - pathPrefix: /sys + readOnly: true + - pathPrefix: / + readOnly: true + {{- range .Values.nodeExporter.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: {{ .Values.nodeExporter.hostNetwork }} + hostPID: {{ .Values.nodeExporter.hostPID }} + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + hostPorts: + - min: 1 + max: 65535 +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/node-exporter/role.yaml b/deploy/prod/charts/prometheus/templates/node-exporter/role.yaml new file mode 100644 index 0000000..d8ef3ed --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/node-exporter/role.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if or (default .Values.nodeExporter.podSecurityPolicy.enabled false) (.Values.podSecurityPolicy.enabled) }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{ include "prometheus.namespace" . | indent 2 }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "prometheus.nodeExporter.fullname" . }} +{{- end }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/node-exporter/rolebinding.yaml b/deploy/prod/charts/prometheus/templates/node-exporter/rolebinding.yaml new file mode 100644 index 0000000..06914b7 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/node-exporter/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{ include "prometheus.namespace" . | indent 2 }} +roleRef: + kind: Role + name: {{ template "prometheus.nodeExporter.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{ include "prometheus.namespace" . | indent 2 }} +{{- end }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/node-exporter/serviceaccount.yaml b/deploy/prod/charts/prometheus/templates/node-exporter/serviceaccount.yaml new file mode 100644 index 0000000..0cf91af --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/node-exporter/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.nodeExporter.enabled .Values.serviceAccounts.nodeExporter.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.nodeExporter.annotations | indent 4 }} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/node-exporter/svc.yaml b/deploy/prod/charts/prometheus/templates/node-exporter/svc.yaml new file mode 100644 index 0000000..26d1eaa --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/node-exporter/svc.yaml @@ -0,0 +1,47 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.nodeExporter.service.annotations }} + annotations: +{{ toYaml .Values.nodeExporter.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{- if .Values.nodeExporter.service.labels }} +{{ toYaml .Values.nodeExporter.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.nodeExporter.service.clusterIP }} + clusterIP: {{ .Values.nodeExporter.service.clusterIP }} +{{- end }} +{{- if .Values.nodeExporter.service.externalIPs }} + externalIPs: +{{ toYaml .Values.nodeExporter.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.nodeExporter.service.loadBalancerIP }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.nodeExporter.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: metrics + {{- if .Values.nodeExporter.hostNetwork }} + port: {{ .Values.nodeExporter.service.hostPort }} + protocol: TCP + targetPort: {{ .Values.nodeExporter.service.hostPort }} + {{- else }} + port: {{ .Values.nodeExporter.service.servicePort }} + protocol: TCP + targetPort: 9100 + {{- end }} + selector: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 4 }} + type: "{{ .Values.nodeExporter.service.type }}" +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/pushgateway/clusterrole.yaml b/deploy/prod/charts/prometheus/templates/pushgateway/clusterrole.yaml new file mode 100644 index 0000000..76ecf05 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/pushgateway/clusterrole.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.pushgateway.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml b/deploy/prod/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml new file mode 100644 index 0000000..15770ee --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.pushgateway.fullname" . }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/pushgateway/deploy.yaml b/deploy/prod/charts/prometheus/templates/pushgateway/deploy.yaml new file mode 100644 index 0000000..ffdbfcc --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/pushgateway/deploy.yaml @@ -0,0 +1,119 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.pushgateway.deploymentAnnotations }} + annotations: + {{ toYaml .Values.pushgateway.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + replicas: {{ .Values.pushgateway.replicaCount }} + {{- if .Values.pushgateway.strategy }} + strategy: +{{ toYaml .Values.pushgateway.strategy | trim | indent 4 }} + {{ if eq .Values.pushgateway.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.pushgateway.podAnnotations }} + annotations: + {{ toYaml .Values.pushgateway.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 8 }} + {{- if .Values.pushgateway.podLabels }} + {{ toYaml .Values.pushgateway.podLabels | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }} + {{- if .Values.pushgateway.extraInitContainers }} + initContainers: +{{ toYaml .Values.pushgateway.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.pushgateway.priorityClassName }} + priorityClassName: "{{ .Values.pushgateway.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }} + image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}" + imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.pushgateway.extraArgs }} + {{- $stringvalue := toString $value }} + {{- if eq $stringvalue "true" }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + ports: + - containerPort: 9091 + livenessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/healthy + {{- else }} + path: /-/healthy + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + readinessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/ready + {{- else }} + path: /-/ready + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + resources: +{{ toYaml .Values.pushgateway.resources | indent 12 }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.pushgateway.persistentVolume.mountPath }}" + subPath: "{{ .Values.pushgateway.persistentVolume.subPath }}" + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.nodeSelector }} + nodeSelector: +{{ toYaml .Values.pushgateway.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.pushgateway.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.securityContext }} + securityContext: +{{ toYaml .Values.pushgateway.securityContext | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.tolerations }} + tolerations: +{{ toYaml .Values.pushgateway.tolerations | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.affinity }} + affinity: +{{ toYaml .Values.pushgateway.affinity | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumes: + - name: storage-volume + persistentVolumeClaim: + claimName: {{ if .Values.pushgateway.persistentVolume.existingClaim }}{{ .Values.pushgateway.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.pushgateway.fullname" . }}{{- end }} + {{- end -}} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/pushgateway/ingress.yaml b/deploy/prod/charts/prometheus/templates/pushgateway/ingress.yaml new file mode 100644 index 0000000..2ff72ab --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/pushgateway/ingress.yaml @@ -0,0 +1,54 @@ +{{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.pushgateway.fullname" . }} +{{- $servicePort := .Values.pushgateway.service.servicePort -}} +{{- $ingressPath := .Values.pushgateway.ingress.path -}} +{{- $ingressPathType := .Values.pushgateway.ingress.pathType -}} +{{- $extraPaths := .Values.pushgateway.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.pushgateway.ingress.annotations }} + annotations: +{{ toYaml .Values.pushgateway.ingress.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.pushgateway.ingress.ingressClassName }} + ingressClassName: {{ .Values.pushgateway.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.pushgateway.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.pushgateway.ingress.tls }} + tls: +{{ toYaml .Values.pushgateway.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/deploy/prod/charts/prometheus/templates/pushgateway/netpol.yaml b/deploy/prod/charts/prometheus/templates/pushgateway/netpol.yaml new file mode 100644 index 0000000..c8d1fb3 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/pushgateway/netpol.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.pushgateway.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9091 +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/pushgateway/pdb.yaml b/deploy/prod/charts/prometheus/templates/pushgateway/pdb.yaml new file mode 100644 index 0000000..50beb48 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/pushgateway/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.pushgateway.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.pushgateway.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.pushgateway.labels" . | nindent 6 }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/pushgateway/psp.yaml b/deploy/prod/charts/prometheus/templates/pushgateway/psp.yaml new file mode 100644 index 0000000..1ca3267 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/pushgateway/psp.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + annotations: +{{- if .Values.pushgateway.podSecurityPolicy.annotations }} +{{ toYaml .Values.pushgateway.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'persistentVolumeClaim' + - 'secret' + allowedHostPaths: + - pathPrefix: {{ .Values.pushgateway.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/pushgateway/pvc.yaml b/deploy/prod/charts/prometheus/templates/pushgateway/pvc.yaml new file mode 100644 index 0000000..d5d64dd --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/pushgateway/pvc.yaml @@ -0,0 +1,31 @@ +{{- if .Values.pushgateway.persistentVolume.enabled -}} +{{- if not .Values.pushgateway.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.pushgateway.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.pushgateway.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.pushgateway.persistentVolume.accessModes | indent 4 }} +{{- if .Values.pushgateway.persistentVolume.storageClass }} +{{- if (eq "-" .Values.pushgateway.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.pushgateway.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.pushgateway.persistentVolume.volumeBindingMode }} + volumeBindingMode: "{{ .Values.pushgateway.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.pushgateway.persistentVolume.size }}" +{{- end -}} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/pushgateway/service.yaml b/deploy/prod/charts/prometheus/templates/pushgateway/service.yaml new file mode 100644 index 0000000..f05f17c --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/pushgateway/service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.pushgateway.service.annotations }} + annotations: +{{ toYaml .Values.pushgateway.service.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +{{- if .Values.pushgateway.service.labels }} +{{ toYaml .Values.pushgateway.service.labels | indent 4}} +{{- end }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.pushgateway.service.clusterIP }} + clusterIP: {{ .Values.pushgateway.service.clusterIP }} +{{- end }} +{{- if .Values.pushgateway.service.externalIPs }} + externalIPs: +{{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.pushgateway.service.servicePort }} + protocol: TCP + targetPort: 9091 + selector: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 4 }} + type: "{{ .Values.pushgateway.service.type }}" +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/pushgateway/serviceaccount.yaml b/deploy/prod/charts/prometheus/templates/pushgateway/serviceaccount.yaml new file mode 100644 index 0000000..8c0b876 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/pushgateway/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.pushgateway.enabled .Values.serviceAccounts.pushgateway.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.pushgateway.annotations | indent 4 }} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/pushgateway/vpa.yaml b/deploy/prod/charts/prometheus/templates/pushgateway/vpa.yaml new file mode 100644 index 0000000..0ac54f9 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/pushgateway/vpa.yaml @@ -0,0 +1,20 @@ +{{- if .Values.pushgateway.enabled -}} +{{- if .Values.pushgateway.verticalAutoscaler.enabled -}} +apiVersion: autoscaling.k8s.io/v1beta2 +kind: VerticalPodAutoscaler +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }}-vpa +{{ include "prometheus.namespace" . | indent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" + kind: Deployment + name: {{ template "prometheus.pushgateway.fullname" . }} + updatePolicy: + updateMode: {{ .Values.pushgateway.verticalAutoscaler.updateMode | default "Off" | quote }} + resourcePolicy: + containerPolicies: {{ .Values.pushgateway.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} +{{- end -}} {{/* if .Values.pushgateway.verticalAutoscaler.enabled */}} +{{- end -}} {{/* .Values.pushgateway.enabled */}} diff --git a/deploy/prod/charts/prometheus/templates/server/clusterrole.yaml b/deploy/prod/charts/prometheus/templates/server/clusterrole.yaml new file mode 100644 index 0000000..2520235 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/clusterrole.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.server.enabled .Values.rbac.create (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.server.fullname" . }} +{{- end }} + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - nodes/metrics + - services + - endpoints + - pods + - ingresses + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses/status + - ingresses + verbs: + - get + - list + - watch + - nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/server/clusterrolebinding.yaml b/deploy/prod/charts/prometheus/templates/server/clusterrolebinding.yaml new file mode 100644 index 0000000..5a79611 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.server.enabled .Values.rbac.create (empty .Values.server.namespaces) (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.server.fullname" . }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/server/cm.yaml b/deploy/prod/charts/prometheus/templates/server/cm.yaml new file mode 100644 index 0000000..a0a813a --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/cm.yaml @@ -0,0 +1,85 @@ +{{- if .Values.server.enabled -}} +{{- if (empty .Values.server.configMapOverrideName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.ruleFiles }} + {{ $key }}: {{- toYaml $value | indent 2 }} +{{- end }} +{{- range $key, $value := .Values.serverFiles }} + {{ $key }}: | +{{- if eq $key "prometheus.yml" }} + global: +{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} +{{- if $root.Values.server.remoteWrite }} + remote_write: +{{ $root.Values.server.remoteWrite | toYaml | indent 4 }} +{{- end }} +{{- if $root.Values.server.remoteRead }} + remote_read: +{{ $root.Values.server.remoteRead | toYaml | indent 4 }} +{{- end }} +{{- end }} +{{- if eq $key "alerts" }} +{{- if and (not (empty $value)) (empty $value.groups) }} + groups: +{{- range $ruleKey, $ruleValue := $value }} + - name: {{ $ruleKey -}}.rules + rules: +{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} +{{- else }} +{{ toYaml $value | indent 4 }} +{{- end }} +{{- else }} +{{ toYaml $value | default "{}" | indent 4 }} +{{- end }} +{{- if eq $key "prometheus.yml" -}} +{{- if $root.Values.extraScrapeConfigs }} +{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} +{{- end -}} +{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }} + alerting: +{{- if $root.Values.alertRelabelConfigs }} +{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} + alertmanagers: +{{- if $root.Values.server.alertmanagers }} +{{ toYaml $root.Values.server.alertmanagers | indent 8 }} +{{- else }} + - kubernetes_sd_configs: + - role: pod + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if $root.Values.alertmanager.prefixURL }} + path_prefix: {{ $root.Values.alertmanager.prefixURL }} + {{- end }} + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + regex: {{ $root.Release.Namespace }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app] + regex: {{ template "prometheus.name" $root }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_component] + regex: alertmanager + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe] + regex: {{ index $root.Values.alertmanager.podAnnotations "prometheus.io/probe" | default ".*" }} + action: keep + - source_labels: [__meta_kubernetes_pod_container_port_number] + regex: "9093" + action: keep +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/server/deploy.yaml b/deploy/prod/charts/prometheus/templates/server/deploy.yaml new file mode 100644 index 0000000..eb86e90 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/deploy.yaml @@ -0,0 +1,324 @@ +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.server.deploymentAnnotations }} + annotations: + {{ toYaml .Values.server.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + {{- if .Values.server.strategy }} + strategy: +{{ toYaml .Values.server.strategy | trim | indent 4 }} + {{ if eq .Values.server.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + {{- if .Values.configmapReload.prometheus.containerPort }} + ports: + - containerPort: {{ .Values.configmapReload.prometheus.containerPort }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.defaultFlagsOverride }} + {{ toYaml .Values.server.defaultFlagsOverride | nindent 12}} + {{- else }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + {{- end }} + ports: + - containerPort: 9090 + {{- if .Values.server.hostPort }} + hostPort: {{ .Values.server.hostPort }} + {{- end }} + readinessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- if .Values.server.probeHeaders }} + httpHeaders: + {{- range .Values.server.probeHeaders}} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- if .Values.server.probeHeaders }} + httpHeaders: + {{- range .Values.server.probeHeaders}} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + {{- if .Values.server.startupProbe.enabled }} + startupProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- if .Values.server.probeHeaders }} + httpHeaders: + {{- range .Values.server.probeHeaders}} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + failureThreshold: {{ .Values.server.startupProbe.failureThreshold }} + periodSeconds: {{ .Values.server.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.server.startupProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.containerSecurityContext }} + securityContext: + {{- toYaml .Values.server.containerSecurityContext | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + hostNetwork: {{ .Values.server.hostNetwork }} + {{- if .Values.server.dnsPolicy }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + {{- if empty .Values.server.configFromSecret }} + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.server.configFromSecret }} + {{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} + - name: storage-volume + {{- if .Values.server.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/server/headless-svc.yaml b/deploy/prod/charts/prometheus/templates/server/headless-svc.yaml new file mode 100644 index 0000000..d519f4e --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/headless-svc.yaml @@ -0,0 +1,37 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.statefulSet.headless.labels }} +{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }}-headless +{{ include "prometheus.namespace" . | indent 2 }} +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.server.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.statefulSet.headless.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.statefulSet.headless.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.statefulSet.headless.gRPC.nodePort }} + nodePort: {{ .Values.server.statefulSet.headless.gRPC.nodePort }} + {{- end }} + {{- end }} + + selector: + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/server/ingress.yaml b/deploy/prod/charts/prometheus/templates/server/ingress.yaml new file mode 100644 index 0000000..000f39c --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/ingress.yaml @@ -0,0 +1,59 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.server.fullname" . }} +{{- $servicePort := .Values.server.service.servicePort -}} +{{- $ingressPath := .Values.server.ingress.path -}} +{{- $ingressPathType := .Values.server.ingress.pathType -}} +{{- $extraPaths := .Values.server.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.server.ingress.annotations }} + annotations: +{{ toYaml .Values.server.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- range $key, $value := .Values.server.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.server.ingress.ingressClassName }} + ingressClassName: {{ .Values.server.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.server.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.server.ingress.tls }} + tls: +{{ toYaml .Values.server.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/server/netpol.yaml b/deploy/prod/charts/prometheus/templates/server/netpol.yaml new file mode 100644 index 0000000..c8870e9 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/netpol.yaml @@ -0,0 +1,18 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + ingress: + - ports: + - port: 9090 +{{- end }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/server/pdb.yaml b/deploy/prod/charts/prometheus/templates/server/pdb.yaml new file mode 100644 index 0000000..364cb5b --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.server.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.server.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.server.labels" . | nindent 6 }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/server/psp.yaml b/deploy/prod/charts/prometheus/templates/server/psp.yaml new file mode 100644 index 0000000..e2b885f --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/psp.yaml @@ -0,0 +1,51 @@ +{{- if and .Values.server.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + annotations: +{{- if .Values.server.podSecurityPolicy.annotations }} +{{ toYaml .Values.server.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + allowedCapabilities: + - 'CHOWN' + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + - 'hostPath' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.server.persistentVolume.mountPath }} + {{- range .Values.server.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/server/pvc.yaml b/deploy/prod/charts/prometheus/templates/server/pvc.yaml new file mode 100644 index 0000000..a735536 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/pvc.yaml @@ -0,0 +1,39 @@ +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +{{- if .Values.server.persistentVolume.enabled -}} +{{- if not .Values.server.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} +{{- if .Values.server.persistentVolume.storageClass }} +{{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.server.persistentVolume.volumeBindingMode }} + volumeBindingMode: "{{ .Values.server.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" +{{- if .Values.server.persistentVolume.selector }} + selector: + {{- toYaml .Values.server.persistentVolume.selector | nindent 4 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/server/rolebinding.yaml b/deploy/prod/charts/prometheus/templates/server/rolebinding.yaml new file mode 100644 index 0000000..93ce3ee --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.server.enabled .Values.rbac.create .Values.server.useExistingClusterRoleName .Values.server.namespaces -}} +{{ range $.Values.server.namespaces -}} +--- +apiVersion: {{ template "rbac.apiVersion" $ }} +kind: RoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" $ | nindent 4 }} + name: {{ template "prometheus.server.fullname" $ }} + namespace: {{ . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" $ }} +{{ include "prometheus.namespace" $ | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $.Values.server.useExistingClusterRoleName }} +{{ end -}} +{{ end -}} diff --git a/deploy/prod/charts/prometheus/templates/server/service.yaml b/deploy/prod/charts/prometheus/templates/server/service.yaml new file mode 100644 index 0000000..01c5a4a --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/service.yaml @@ -0,0 +1,60 @@ +{{- if and .Values.server.enabled .Values.server.service.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.service.annotations }} + annotations: +{{ toYaml .Values.server.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.service.labels }} +{{ toYaml .Values.server.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} +{{- end }} +{{- if .Values.server.service.externalIPs }} + externalIPs: +{{ toYaml .Values.server.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.server.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} +{{- end }} +{{- if .Values.server.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.server.service.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.service.nodePort }} + nodePort: {{ .Values.server.service.nodePort }} + {{- end }} + {{- if .Values.server.service.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.service.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.service.gRPC.nodePort }} + nodePort: {{ .Values.server.service.gRPC.nodePort }} + {{- end }} + {{- end }} + selector: + {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} + statefulset.kubernetes.io/pod-name: {{ template "prometheus.server.fullname" . }}-{{ .Values.server.service.statefulsetReplica.replica }} + {{- else -}} + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- if .Values.server.service.sessionAffinity }} + sessionAffinity: {{ .Values.server.service.sessionAffinity }} +{{- end }} + {{- end }} + type: "{{ .Values.server.service.type }}" +{{- end -}} diff --git a/deploy/prod/charts/prometheus/templates/server/serviceaccount.yaml b/deploy/prod/charts/prometheus/templates/server/serviceaccount.yaml new file mode 100644 index 0000000..9c0502a --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.serviceAccounts.server.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.server" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.server.annotations | indent 4 }} +{{- end }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/server/sts.yaml b/deploy/prod/charts/prometheus/templates/server/sts.yaml new file mode 100644 index 0000000..24a9d92 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/sts.yaml @@ -0,0 +1,302 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.server.statefulSet.annotations }} + annotations: + {{ toYaml .Values.server.statefulSet.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- if .Values.server.statefulSet.labels}} + {{ toYaml .Values.server.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + serviceName: {{ template "prometheus.server.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + {{- if .Values.configmapReload.prometheus.containerPort }} + ports: + - containerPort: {{ .Values.configmapReload.prometheus.containerPort }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.defaultFlagsOverride }} + {{ toYaml .Values.server.defaultFlagsOverride | nindent 12}} + {{- else }} + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + {{- end }} + ports: + - containerPort: 9090 + {{- if .Values.server.hostPort }} + hostPort: {{ .Values.server.hostPort }} + {{- end }} + readinessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + hostNetwork: {{ .Values.server.hostNetwork }} + {{- if .Values.server.dnsPolicy }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + {{- if empty .Values.server.configFromSecret }} + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.server.configFromSecret }} + {{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} +{{- if .Values.server.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} +{{- end }} +{{- end }} +{{- end }} diff --git a/deploy/prod/charts/prometheus/templates/server/vpa.yaml b/deploy/prod/charts/prometheus/templates/server/vpa.yaml new file mode 100644 index 0000000..981a9b4 --- /dev/null +++ b/deploy/prod/charts/prometheus/templates/server/vpa.yaml @@ -0,0 +1,24 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.verticalAutoscaler.enabled -}} +apiVersion: autoscaling.k8s.io/v1beta2 +kind: VerticalPodAutoscaler +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }}-vpa +{{ include "prometheus.namespace" . | indent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" +{{- if .Values.server.statefulSet.enabled }} + kind: StatefulSet +{{- else }} + kind: Deployment +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + updatePolicy: + updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} + resourcePolicy: + containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} +{{- end -}} {{/* if .Values.server.verticalAutoscaler.enabled */}} +{{- end -}} {{/* .Values.server.enabled */}} diff --git a/deploy/prod/charts/prometheus/values.yaml b/deploy/prod/charts/prometheus/values.yaml new file mode 100644 index 0000000..64324eb --- /dev/null +++ b/deploy/prod/charts/prometheus/values.yaml @@ -0,0 +1,1840 @@ +rbac: + create: true + +podSecurityPolicy: + enabled: false + +imagePullSecrets: +# - name: "image-pull-secret" + +## Define serviceAccount names for components. Defaults to component's fully qualified name. +## +serviceAccounts: + alertmanager: + create: true + name: + annotations: {} + nodeExporter: + create: true + name: + annotations: {} + pushgateway: + create: true + name: + annotations: {} + server: + create: true + name: + annotations: {} + +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: true + + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a Role and RoleBinding in the defined namespaces ONLY + ## This makes alertmanager work - for users who do not have ClusterAdmin privs, but wants alertmanager to operate on their own namespaces, instead of clusterwide. + useClusterRole: true + + ## Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. + useExistingRole: false + + ## alertmanager container name + ## + name: alertmanager + + ## alertmanager container image + ## + image: + repository: quay.io/prometheus/alertmanager + tag: v0.23.0 + pullPolicy: IfNotPresent + + ## alertmanager priorityClassName + ## + priorityClassName: "" + + ## Custom HTTP headers for Readiness Probe + ## + ## Useful for providing HTTP Basic Auth to healthchecks + probeHeaders: [] + + ## Additional alertmanager container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access alertmanager + baseURL: "http://localhost:9093" + + ## Additional alertmanager container environment variable + ## For instance to add a http_proxy + ## + extraEnv: {} + + ## Additional alertmanager Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: alertmanager-secret-files + # readOnly: true + + ## Additional alertmanager Configmap mounts + extraConfigmapMounts: [] + # - name: template-files + # mountPath: /etc/config/templates.d + # configMap: alertmanager-template-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config + ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configFromSecret: "" + + ## The configuration file name to be loaded to alertmanager + ## Must match the key within configuration loaded from ConfigMap/Secret + ## + configFileName: alertmanager.yml + + ingress: + ## If true, alertmanager Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## alertmanager Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## alertmanager Ingress additional labels + ## + extraLabels: {} + + ## alertmanager Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - alertmanager.domain.com + # - domain.com/alertmanager + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## alertmanager Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - alertmanager.domain.com + + ## Alertmanager Deployment Strategy type + # strategy: + # type: Recreate + + ## Node tolerations for alertmanager scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for alertmanager pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, alertmanager will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## alertmanager data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## alertmanager data Persistent Volume Claim annotations + ## + annotations: {} + + ## alertmanager data Persistent Volume existing claim name + ## Requires alertmanager.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## alertmanager data Persistent Volume mount root path + ## + mountPath: /data + + ## alertmanager data Persistent Volume size + ## + size: 2Gi + + ## alertmanager data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## alertmanager data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of alertmanager data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Persistent Volume Claim Selector + ## Useful if Persistent Volumes have been provisioned in advance + ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + # selector: + # matchLabels: + # release: "stable" + # matchExpressions: + # - { key: environment, operator: In, values: [ dev ] } + + emptyDir: + ## alertmanager emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to alertmanager pods + ## + podAnnotations: {} + ## Tell prometheus to use a specific set of alertmanager pods + ## instead of all alertmanager pods found in the same namespace + ## Useful if you deploy multiple releases within the same namespace + ## + ## prometheus.io/probe: alertmanager-teamA + + ## Labels to be added to Prometheus AlertManager pods + ## + podLabels: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + enableMeshPeer: false + + servicePort: 80 + + ## alertmanager resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # Custom DNS configuration to be added to alertmanager pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to alertmanager pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + # enableMeshPeer : true + + ## List of IP addresses at which the alertmanager service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + # nodePort: 30000 + sessionAffinity: None + type: ClusterIP + + ## List of initial peers + ## Ref: https://github.com/prometheus/alertmanager/blob/main/README.md#high-availability + clusterPeers: [] + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/jimmidyson/configmap-reload +## +configmapReload: + prometheus: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.5.0 + pullPolicy: IfNotPresent + + # containerPort: 9533 + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + alertmanager: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.5.0 + pullPolicy: IfNotPresent + + # containerPort: 9533 + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +kubeStateMetrics: + ## If false, kube-state-metrics sub-chart will not be installed + ## + enabled: true + +## kube-state-metrics sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics +## +# kube-state-metrics: + +nodeExporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + ## If true, node-exporter pods share the host network namespace + ## + hostNetwork: true + + ## If true, node-exporter pods share the host PID namespace + ## + hostPID: true + + ## If true, node-exporter pods mounts host / at /host/root + ## + hostRootfs: true + + ## node-exporter container name + ## + name: node-exporter + + ## node-exporter container image + ## + image: + repository: quay.io/prometheus/node-exporter + tag: v1.3.0 + pullPolicy: IfNotPresent + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## node-exporter priorityClassName + ## + priorityClassName: "" + + ## Custom Update Strategy + ## + updateStrategy: + type: RollingUpdate + + ## Additional node-exporter container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional node-exporter hostPath mounts + ## + extraHostPathMounts: [] + # - name: textfile-dir + # mountPath: /srv/txt_collector + # hostPath: /var/lib/node-exporter + # readOnly: true + # mountPropagation: HostToContainer + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # configMap: certs-configmap + # readOnly: true + + ## Node tolerations for node-exporter scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for node-exporter pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to node-exporter pods + ## + podAnnotations: {} + + ## Labels to be added to node-exporter pods + ## + pod: + labels: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## node-exporter resource limits & requests + ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + container: + securityContext: + allowPrivilegeEscalation: false + # Custom DNS configuration to be added to node-exporter pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to node-exporter pods + ## + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} + + # Exposed as a headless service: + # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services + clusterIP: None + + ## List of IP addresses at which the node-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + hostPort: 9100 + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9100 + type: ClusterIP + +server: + ## Prometheus server container name + ## + enabled: true + + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a RoleBinding in the defined namespaces ONLY + ## + ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. + ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. + ## + ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. + ## + # useExistingClusterRoleName: nameofclusterrole + + ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. + # namespaces: + # - yournamespace + + name: server + + # sidecarContainers - add more containers to prometheus server + # Key/Value where Key is the sidecar `- name: ` + # Example: + # sidecarContainers: + # webserver: + # image: nginx + sidecarContainers: {} + + # sidecarTemplateValues - context to be used in template for sidecarContainers + # Example: + # sidecarTemplateValues: *your-custom-globals + # sidecarContainers: + # webserver: |- + # {{ include "webserver-container-template" . }} + # Template for `webserver-container-template` might looks like this: + # image: "{{ .Values.server.sidecarTemplateValues.repository }}:{{ .Values.server.sidecarTemplateValues.tag }}" + # ... + # + sidecarTemplateValues: {} + + ## Prometheus server container image + ## + image: + repository: quay.io/prometheus/prometheus + tag: v2.34.0 + pullPolicy: IfNotPresent + + ## prometheus server priorityClassName + ## + priorityClassName: "" + + ## EnableServiceLinks indicates whether information about services should be injected + ## into pod's environment variables, matching the syntax of Docker links. + ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. + ## + enableServiceLinks: true + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access prometheus + ## Maybe same with Ingress host name + baseURL: "" + + ## Additional server container environment variables + ## + ## You specify this manually like you would a raw deployment manifest. + ## This means you can bind in environment variables from secrets. + ## + ## e.g. static environment variable: + ## - name: DEMO_GREETING + ## value: "Hello from the environment" + ## + ## e.g. secret environment variable: + ## - name: USERNAME + ## valueFrom: + ## secretKeyRef: + ## name: mysecret + ## key: username + env: [] + + # List of flags to override default parameters, e.g: + # - --enable-feature=agent + # - --storage.agent.retention.max-time=30m + defaultFlagsOverride: [] + + extraFlags: + - web.enable-lifecycle + ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as + ## deleting time series. This is disabled by default. + # - web.enable-admin-api + ## + ## storage.tsdb.no-lockfile flag controls BD locking + # - storage.tsdb.no-lockfile + ## + ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) + # - storage.tsdb.wal-compression + + ## Path to a configuration file on prometheus server container FS + configPath: /etc/config/prometheus.yml + + ### The data directory used by prometheus to set --storage.tsdb.path + ### When empty server.persistentVolume.mountPath is used instead + storagePath: "" + + global: + ## How frequently to scrape targets by default + ## + scrape_interval: 1m + ## How long until a scrape request times out + ## + scrape_timeout: 10s + ## How frequently to evaluate rules + ## + evaluation_interval: 1m + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write + ## + remoteWrite: [] + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read + ## + remoteRead: [] + + ## Custom HTTP headers for Liveness/Readiness/Startup Probe + ## + ## Useful for providing HTTP Basic Auth to healthchecks + probeHeaders: [] + + ## Additional Prometheus server container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional Prometheus server Volume mounts + ## + extraVolumeMounts: [] + + ## Additional Prometheus server Volumes + ## + extraVolumes: [] + + ## Additional Prometheus server hostPath mounts + ## + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # subPath: "" + # hostPath: /etc/kubernetes/certs + # readOnly: true + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # subPath: "" + # configMap: certs-configmap + # readOnly: true + + ## Additional Prometheus server Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: prom-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress additional labels + ## + extraLabels: {} + + ## Prometheus server Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + # - domain.com/prometheus + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Server Deployment Strategy type + # strategy: + # type: Recreate + + ## hostAliases allows adding entries to /etc/hosts inside the containers + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Prometheus server data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Persistent Volume Claim Selector + ## Useful if Persistent Volumes have been provisioned in advance + ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + # selector: + # matchLabels: + # release: "stable" + # matchExpressions: + # - { key: environment, operator: In, values: [ dev ] } + + emptyDir: + ## Prometheus server emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + ## Labels to be added to Prometheus server pods + ## + podLabels: {} + + ## Prometheus AlertManager configuration + ## + alertmanagers: [] + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + servicePort: 80 + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## Prometheus server readiness and liveness probe initial delay and timeout + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## + tcpSocketProbeEnabled: false + probeScheme: HTTP + readinessProbeInitialDelay: 30 + readinessProbePeriodSeconds: 5 + readinessProbeTimeout: 4 + readinessProbeFailureThreshold: 3 + readinessProbeSuccessThreshold: 1 + livenessProbeInitialDelay: 30 + livenessProbePeriodSeconds: 15 + livenessProbeTimeout: 10 + livenessProbeFailureThreshold: 3 + livenessProbeSuccessThreshold: 1 + startupProbe: + enabled: false + periodSeconds: 5 + failureThreshold: 30 + timeoutSeconds: 10 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + # When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet + dnsPolicy: ClusterFirst + + # Use hostPort + # hostPort: 9090 + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-server' + + # Custom DNS configuration to be added to prometheus server pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + ## Security context to be added to server pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + ## If false, no Service will be created for the Prometheus server + ## + enabled: true + + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + sessionAffinity: None + type: ClusterIP + + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## If using a statefulSet (statefulSet.enabled=true), configure the + ## service to connect to a specific replica to have a consistent view + ## of the data. + statefulsetReplica: + enabled: false + replica: 0 + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + + ## Prometheus data retention period (default if not specified is 15 days) + ## + retention: "15d" + +pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: true + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## pushgateway container name + ## + name: pushgateway + + ## pushgateway container image + ## + image: + repository: prom/pushgateway + tag: v1.4.2 + pullPolicy: IfNotPresent + + ## pushgateway priorityClassName + ## + priorityClassName: "" + + ## Additional pushgateway container arguments + ## + ## for example: persistence.file: /data/pushgateway.data + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ingress: + ## If true, pushgateway Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## pushgateway Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## pushgateway Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - pushgateway.domain.com + # - domain.com/pushgateway + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## pushgateway Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - pushgateway.domain.com + + ## Node tolerations for pushgateway scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for pushgateway pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to pushgateway pods + ## + podAnnotations: {} + + ## Labels to be added to pushgateway pods + ## + podLabels: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## pushgateway resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-pushgateway' + + # Custom DNS configuration to be added to push-gateway pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to push-gateway pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + + service: + annotations: + prometheus.io/probe: pushgateway + labels: {} + clusterIP: "" + + ## List of IP addresses at which the pushgateway service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9091 + type: ClusterIP + + ## pushgateway Deployment Strategy type + # strategy: + # type: Recreate + + persistentVolume: + ## If true, pushgateway will create/use a Persistent Volume Claim + ## + enabled: false + + ## pushgateway data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## pushgateway data Persistent Volume Claim annotations + ## + annotations: {} + + ## pushgateway data Persistent Volume existing claim name + ## Requires pushgateway.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## pushgateway data Persistent Volume mount root path + ## + mountPath: /data + + ## pushgateway data Persistent Volume size + ## + size: 2Gi + + ## pushgateway data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## pushgateway data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of pushgateway data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + +## alertmanager ConfigMap entries +## +alertmanagerFiles: + alertmanager.yml: + global: {} + # slack_api_url: '' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + +## Prometheus server ConfigMap entries for rule files (allow prometheus labels interpolation) +ruleFiles: {} + +## Prometheus server ConfigMap entries +## +serverFiles: + + ## Alerts configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + alerting_rules.yml: {} + # groups: + # - name: Instances + # rules: + # - alert: InstanceDown + # expr: up == 0 + # for: 5m + # labels: + # severity: page + # annotations: + # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' + # summary: 'Instance {{ $labels.instance }} down' + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml + alerts: {} + + ## Records configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ + recording_rules.yml: {} + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics + + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + # This configuration will work only on kubelet 1.7.3+ + # As the scrape endpoints for cAdvisor have changed + # if you are using older version you need to change the replacement to + # replacement: /api/v1/nodes/$1:4194/proxy/metrics + # more info here https://github.com/coreos/prometheus-operator/issues/633 + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of + # `true`, except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints' + honor_labels: true + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # Scrape config for slow service endpoints; same as above, but with a larger + # timeout and a larger interval + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + honor_labels: true + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: service + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`, + # except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + honor_labels: true + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + + # Example Scrape config for pods which should be scraped slower. An useful example + # would be stackriver-exporter which queries an API on every scrape of the pod + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + +# adds additional scrape configs to prometheus.yml +# must be a string so you have to add a | after extraScrapeConfigs: +# example adds prometheus-blackbox-exporter scrape config +extraScrapeConfigs: + # - job_name: 'prometheus-blackbox-exporter' + # metrics_path: /probe + # params: + # module: [http_2xx] + # static_configs: + # - targets: + # - https://example.com + # relabel_configs: + # - source_labels: [__address__] + # target_label: __param_target + # - source_labels: [__param_target] + # target_label: instance + # - target_label: __address__ + # replacement: prometheus-blackbox-exporter:9115 + +# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager +# useful in H/A prometheus with different external labels but the same alerts +alertRelabelConfigs: + # alert_relabel_configs: + # - source_labels: [dc] + # regex: (.+)\d+ + # target_label: dc + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + +# Force namespace of namespaced resources +forceNamespace: null