Compare commits

...

5 Commits

Author SHA1 Message Date
Rob Watson 6ace4a6415 prometheus: Update chart 2022-12-20 09:09:13 +01:00
Rob Watson 9eb4d87fcb metrics-server: Update chart 2022-12-20 09:09:13 +01:00
Rob Watson 6027ee7cf9 ingress-nginx: Update chart 2022-12-20 09:09:13 +01:00
Rob Watson 8c69873046 external-dns: Update chart 2022-12-20 09:09:13 +01:00
Rob Watson 61052424c3 grafana: Update chart 2022-12-20 09:09:13 +01:00
82 changed files with 685 additions and 693 deletions

View File

@ -3,10 +3,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns
name: external-dns-default
namespace: "default"
labels:
app.kubernetes.io/name: external-dns
helm.sh/chart: external-dns-6.3.0
helm.sh/chart: external-dns-6.12.2
app.kubernetes.io/instance: external-dns
app.kubernetes.io/managed-by: Helm
rules:

View File

@ -3,17 +3,18 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns
name: external-dns-default
namespace: "default"
labels:
app.kubernetes.io/name: external-dns
helm.sh/chart: external-dns-6.3.0
helm.sh/chart: external-dns-6.12.2
app.kubernetes.io/instance: external-dns
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
name: external-dns-default
subjects:
- kind: ServiceAccount
name: external-dns
namespace: default
namespace: "default"

View File

@ -4,10 +4,10 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: default
namespace: "default"
labels:
app.kubernetes.io/name: external-dns
helm.sh/chart: external-dns-6.3.0
helm.sh/chart: external-dns-6.12.2
app.kubernetes.io/instance: external-dns
app.kubernetes.io/managed-by: Helm
spec:
@ -20,7 +20,7 @@ spec:
metadata:
labels:
app.kubernetes.io/name: external-dns
helm.sh/chart: external-dns-6.3.0
helm.sh/chart: external-dns-6.12.2
app.kubernetes.io/instance: external-dns
app.kubernetes.io/managed-by: Helm
annotations:
@ -39,8 +39,6 @@ spec:
matchLabels:
app.kubernetes.io/name: external-dns
app.kubernetes.io/instance: external-dns
namespaces:
- "default"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
@ -48,7 +46,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: "docker.io/bitnami/external-dns:0.11.1-debian-10-r1"
image: "docker.io/bitnami/external-dns:0.13.1-debian-11-r19"
imagePullPolicy: "IfNotPresent"
args:
# Generic arguments

View File

@ -4,13 +4,15 @@ apiVersion: v1
kind: Service
metadata:
name: external-dns
namespace: default
namespace: "default"
labels:
app.kubernetes.io/name: external-dns
helm.sh/chart: external-dns-6.3.0
helm.sh/chart: external-dns-6.12.2
app.kubernetes.io/instance: external-dns
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: http
port: 7979
@ -19,4 +21,3 @@ spec:
selector:
app.kubernetes.io/name: external-dns
app.kubernetes.io/instance: external-dns
type: ClusterIP

View File

@ -4,10 +4,11 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: default
namespace: "default"
labels:
app.kubernetes.io/name: external-dns
helm.sh/chart: external-dns-6.3.0
helm.sh/chart: external-dns-6.12.2
app.kubernetes.io/instance: external-dns
app.kubernetes.io/managed-by: Helm
annotations:
automountServiceAccountToken: true

View File

@ -4,10 +4,10 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
helm.sh/chart: grafana-6.29.2
helm.sh/chart: grafana-6.48.0
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana
app.kubernetes.io/version: "8.5.0"
app.kubernetes.io/version: "9.3.1"
app.kubernetes.io/managed-by: Helm
name: grafana-clusterrole
rules: []

View File

@ -5,10 +5,10 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: grafana-clusterrolebinding
labels:
helm.sh/chart: grafana-6.29.2
helm.sh/chart: grafana-6.48.0
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana
app.kubernetes.io/version: "8.5.0"
app.kubernetes.io/version: "9.3.1"
app.kubernetes.io/managed-by: Helm
subjects:
- kind: ServiceAccount

View File

@ -6,10 +6,10 @@ metadata:
name: grafana
namespace: default
labels:
helm.sh/chart: grafana-6.29.2
helm.sh/chart: grafana-6.48.0
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana
app.kubernetes.io/version: "8.5.0"
app.kubernetes.io/version: "9.3.1"
app.kubernetes.io/managed-by: Helm
data:
grafana.ini: |
@ -24,7 +24,8 @@ data:
logs = /var/log/grafana
plugins = /var/lib/grafana/plugins
provisioning = /etc/grafana/provisioning
[server]
domain = ''
datasources.yaml: |
apiVersion: 1
datasources:
@ -53,33 +54,51 @@ data:
--max-time 60 \
-H "Accept: application/json" \
-H "Content-Type: application/json;charset=UTF-8" \
"https://grafana.com/api/dashboards/10046/revisions/1/download" | sed '/-- .* --/! s/"datasource":.*,/"datasource": "Prometheus",/g'\
"https://grafana.com/api/dashboards/13192/revisions/1/download" \
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "Prometheus",/g' \
> "/var/lib/grafana/dashboards/default/gitea.json"
curl -skf \
--connect-timeout 60 \
--max-time 60 \
-H "Accept: application/json" \
-H "Content-Type: application/json;charset=UTF-8" \
"https://grafana.com/api/dashboards/12006/revisions/1/download" | sed '/-- .* --/! s/"datasource":.*,/"datasource": "Prometheus",/g'\
"https://grafana.com/api/dashboards/12006/revisions/1/download" \
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "Prometheus",/g' \
> "/var/lib/grafana/dashboards/default/kubernetes-apiserver.json"
curl -skf \
--connect-timeout 60 \
--max-time 60 \
-H "Accept: application/json" \
-H "Content-Type: application/json;charset=UTF-8" \
"https://grafana.com/api/dashboards/9614/revisions/1/download" | sed '/-- .* --/! s/"datasource":.*,/"datasource": "Prometheus",/g'\
"https://grafana.com/api/dashboards/9614/revisions/1/download" \
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "Prometheus",/g' \
> "/var/lib/grafana/dashboards/default/nginx-ingress.json"
curl -skf \
--connect-timeout 60 \
--max-time 60 \
-H "Accept: application/json" \
-H "Content-Type: application/json;charset=UTF-8" \
"https://grafana.com/api/dashboards/1860/revisions/26/download" | sed '/-- .* --/! s/"datasource":.*,/"datasource": "Prometheus",/g'\
"https://grafana.com/api/dashboards/1860/revisions/26/download" \
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "Prometheus",/g' \
> "/var/lib/grafana/dashboards/default/node.json"
curl -skf \
--connect-timeout 60 \
--max-time 60 \
-H "Accept: application/json" \
-H "Content-Type: application/json;charset=UTF-8" \
"https://grafana.com/api/dashboards/9628/revisions/7/download" | sed '/-- .* --/! s/"datasource":.*,/"datasource": "Prometheus",/g'\
"https://grafana.com/api/dashboards/9628/revisions/7/download" \
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "Prometheus",/g' \
> "/var/lib/grafana/dashboards/default/postgresql.json"
curl -skf \
--connect-timeout 60 \
--max-time 60 \
-H "Accept: application/json" \
-H "Content-Type: application/json;charset=UTF-8" \
"https://grafana.com/api/dashboards/10046/revisions/1/download" \
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "Prometheus",/g' \
> "/var/lib/grafana/dashboards/default/synapse.json"

View File

@ -6,10 +6,10 @@ metadata:
name: grafana-dashboards-default
namespace: default
labels:
helm.sh/chart: grafana-6.29.2
helm.sh/chart: grafana-6.48.0
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana
app.kubernetes.io/version: "8.5.0"
app.kubernetes.io/version: "9.3.1"
app.kubernetes.io/managed-by: Helm
dashboard-provider: default
data:

View File

@ -6,10 +6,10 @@ metadata:
name: grafana
namespace: default
labels:
helm.sh/chart: grafana-6.29.2
helm.sh/chart: grafana-6.48.0
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana
app.kubernetes.io/version: "8.5.0"
app.kubernetes.io/version: "9.3.1"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
@ -26,8 +26,8 @@ spec:
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana
annotations:
checksum/config: 6060638a52f08e279c64b37a63b59ec99aa0b12a2a25e594b3d7acd41512ea01
checksum/dashboards-json-config: 60bfce132b37398fa9329494762f049aebef4ba473dabdd67d4f15d6a86a578c
checksum/config: 60be875675b2ad83279ee347d5a150559b24646f43fbc740f51b97915a0b9c31
checksum/dashboards-json-config: 703b33634d715cefba0501f04654c5d6dc28aba46888183ea3420ccdae3c8ecf
checksum/sc-dashboard-provider-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
spec:
@ -39,12 +39,10 @@ spec:
runAsUser: 472
initContainers:
- name: download-dashboards
image: "curlimages/curl:7.73.0"
image: "curlimages/curl:7.85.0"
imagePullPolicy: IfNotPresent
command: ["/bin/sh"]
args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ]
resources:
{}
env:
volumeMounts:
- name: config
@ -55,7 +53,7 @@ spec:
enableServiceLinks: true
containers:
- name: grafana
image: "grafana/grafana:8.5.0"
image: "grafana/grafana:9.3.1"
imagePullPolicy: IfNotPresent
volumeMounts:
- name: config
@ -70,9 +68,6 @@ spec:
mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml"
subPath: "dashboardproviders.yaml"
ports:
- name: service
containerPort: 80
protocol: TCP
- name: grafana
containerPort: 3000
protocol: TCP
@ -106,8 +101,6 @@ spec:
httpGet:
path: /api/health
port: 3000
resources:
{}
volumes:
- name: config
configMap:

View File

@ -1,51 +0,0 @@
---
# Source: grafana/templates/podsecuritypolicy.yaml
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: grafana
labels:
helm.sh/chart: grafana-6.29.2
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana
app.kubernetes.io/version: "8.5.0"
app.kubernetes.io/managed-by: Helm
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
# Default set from Docker, with DAC_OVERRIDE and CHOWN
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'csi'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
readOnlyRootFilesystem: false

View File

@ -6,13 +6,13 @@ metadata:
name: grafana
namespace: default
labels:
helm.sh/chart: grafana-6.29.2
helm.sh/chart: grafana-6.48.0
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana
app.kubernetes.io/version: "8.5.0"
app.kubernetes.io/version: "9.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [grafana]
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [grafana]

View File

@ -6,10 +6,10 @@ metadata:
name: grafana
namespace: default
labels:
helm.sh/chart: grafana-6.29.2
helm.sh/chart: grafana-6.48.0
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana
app.kubernetes.io/version: "8.5.0"
app.kubernetes.io/version: "9.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io

View File

@ -6,10 +6,10 @@ metadata:
name: grafana
namespace: default
labels:
helm.sh/chart: grafana-6.29.2
helm.sh/chart: grafana-6.48.0
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana
app.kubernetes.io/version: "8.5.0"
app.kubernetes.io/version: "9.3.1"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
@ -18,7 +18,6 @@ spec:
port: 80
protocol: TCP
targetPort: 3000
selector:
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana

View File

@ -4,10 +4,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: grafana-6.29.2
helm.sh/chart: grafana-6.48.0
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: grafana
app.kubernetes.io/version: "8.5.0"
app.kubernetes.io/version: "9.3.1"
app.kubernetes.io/managed-by: Helm
name: grafana
namespace: default

View File

@ -8,10 +8,10 @@ metadata:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook

View File

@ -8,10 +8,10 @@ metadata:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook

View File

@ -9,10 +9,10 @@ metadata:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
@ -21,17 +21,17 @@ spec:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: "k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660"
image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f"
imagePullPolicy: IfNotPresent
args:
- create
@ -43,13 +43,13 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
fsGroup: 2000

View File

@ -9,10 +9,10 @@ metadata:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
@ -21,17 +21,17 @@ spec:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: "k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660"
image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f"
imagePullPolicy: IfNotPresent
args:
- patch
@ -45,13 +45,13 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
fsGroup: 2000

View File

@ -9,10 +9,10 @@ metadata:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook

View File

@ -9,10 +9,10 @@ metadata:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook

View File

@ -9,10 +9,10 @@ metadata:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook

View File

@ -6,10 +6,10 @@ apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook

View File

@ -4,10 +4,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
@ -24,6 +24,13 @@ rules:
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
@ -67,3 +74,11 @@ rules:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get

View File

@ -4,10 +4,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx

View File

@ -4,10 +4,10 @@ apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller

View File

@ -4,10 +4,10 @@ apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@ -32,7 +32,7 @@ spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: "k8s.gcr.io/ingress-nginx/controller:v1.2.0@sha256:d8196e3bc1e72547c5dec66d6556c0ff92a23f6d0919b206be170bc90d5f9185"
image: "registry.k8s.io/ingress-nginx/controller:v1.5.1@sha256:4ba73c697770664c1e00e9f968de14e08f606ff961c76e5d7033a4a9c593c629"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@ -42,7 +42,7 @@ spec:
args:
- /nginx-ingress-controller
- --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
- --election-id=ingress-controller-leader
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller

View File

@ -6,10 +6,10 @@ apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller

View File

@ -4,10 +4,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@ -61,12 +61,17 @@ rules:
- get
- list
- watch
# TODO(Jintao Zhang)
# Once we release a new version of the controller,
# we will be able to remove the configmap related permissions
# We have used the Lease API for selection
# ref: https://github.com/kubernetes/ingress-nginx/pull/8921
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-controller-leader
- ingress-nginx-leader
verbs:
- get
- update
@ -76,6 +81,21 @@ rules:
- configmaps
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- ingress-nginx-leader
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
@ -83,3 +103,11 @@ rules:
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get

View File

@ -4,10 +4,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller

View File

@ -7,10 +7,10 @@ metadata:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller

View File

@ -4,10 +4,10 @@ apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller

View File

@ -5,10 +5,10 @@ kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller

View File

@ -4,10 +4,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.0
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller

View File

@ -5,10 +5,10 @@ kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
labels:
helm.sh/chart: metrics-server-3.8.2
helm.sh/chart: metrics-server-3.8.3
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.6.1"
app.kubernetes.io/version: "0.6.2"
app.kubernetes.io/managed-by: Helm
spec:
group: metrics.k8s.io
@ -17,5 +17,6 @@ spec:
service:
name: metrics-server
namespace: default
port: 443
version: v1beta1
versionPriority: 100

View File

@ -5,10 +5,10 @@ kind: ClusterRole
metadata:
name: system:metrics-server-aggregated-reader
labels:
helm.sh/chart: metrics-server-3.8.2
helm.sh/chart: metrics-server-3.8.3
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.6.1"
app.kubernetes.io/version: "0.6.2"
app.kubernetes.io/managed-by: Helm
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"

View File

@ -5,10 +5,10 @@ kind: ClusterRole
metadata:
name: system:metrics-server
labels:
helm.sh/chart: metrics-server-3.8.2
helm.sh/chart: metrics-server-3.8.3
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.6.1"
app.kubernetes.io/version: "0.6.2"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:

View File

@ -5,10 +5,10 @@ kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
labels:
helm.sh/chart: metrics-server-3.8.2
helm.sh/chart: metrics-server-3.8.3
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.6.1"
app.kubernetes.io/version: "0.6.2"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io

View File

@ -5,10 +5,10 @@ kind: ClusterRoleBinding
metadata:
name: system:metrics-server
labels:
helm.sh/chart: metrics-server-3.8.2
helm.sh/chart: metrics-server-3.8.3
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.6.1"
app.kubernetes.io/version: "0.6.2"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io

View File

@ -4,11 +4,12 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: default
labels:
helm.sh/chart: metrics-server-3.8.2
helm.sh/chart: metrics-server-3.8.3
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.6.1"
app.kubernetes.io/version: "0.6.2"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
@ -22,6 +23,7 @@ spec:
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
spec:
schedulerName:
serviceAccountName: metrics-server
priorityClassName: "system-cluster-critical"
containers:
@ -31,7 +33,7 @@ spec:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1
image: k8s.gcr.io/metrics-server/metrics-server:v0.6.2
imagePullPolicy: IfNotPresent
args:
- --secure-port=4443

View File

@ -4,12 +4,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
namespace: default
labels:
helm.sh/chart: metrics-server-3.8.2
helm.sh/chart: metrics-server-3.8.3
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.6.1"
app.kubernetes.io/version: "0.6.2"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io

View File

@ -4,11 +4,12 @@ apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: default
labels:
helm.sh/chart: metrics-server-3.8.2
helm.sh/chart: metrics-server-3.8.3
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.6.1"
app.kubernetes.io/version: "0.6.2"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP

View File

@ -4,9 +4,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: default
labels:
helm.sh/chart: metrics-server-3.8.2
helm.sh/chart: metrics-server-3.8.3
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.6.1"
app.kubernetes.io/version: "0.6.2"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,24 @@
---
# Source: prometheus/charts/alertmanager/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-alertmanager
labels:
helm.sh/chart: alertmanager-0.22.2
app.kubernetes.io/name: alertmanager
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v0.24.0"
app.kubernetes.io/managed-by: Helm
data:
alertmanager.yml: |
global: {}
receivers:
- name: default-receiver
route:
group_interval: 5m
group_wait: 10s
receiver: default-receiver
repeat_interval: 3h
templates:
- /etc/alertmanager/*.tmpl

View File

@ -0,0 +1,12 @@
---
# Source: prometheus/charts/alertmanager/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-alertmanager
labels:
helm.sh/chart: alertmanager-0.22.2
app.kubernetes.io/name: alertmanager
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v0.24.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,44 @@
---
# Source: prometheus/charts/alertmanager/templates/services.yaml
apiVersion: v1
kind: Service
metadata:
name: prometheus-alertmanager
labels:
helm.sh/chart: alertmanager-0.22.2
app.kubernetes.io/name: alertmanager
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v0.24.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9093
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: alertmanager
app.kubernetes.io/instance: prometheus
---
# Source: prometheus/charts/alertmanager/templates/services.yaml
apiVersion: v1
kind: Service
metadata:
name: prometheus-alertmanager-headless
labels:
helm.sh/chart: alertmanager-0.22.2
app.kubernetes.io/name: alertmanager
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v0.24.0"
app.kubernetes.io/managed-by: Helm
spec:
clusterIP: None
ports:
- port: 9093
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: alertmanager
app.kubernetes.io/instance: prometheus

View File

@ -0,0 +1,82 @@
---
# Source: prometheus/charts/alertmanager/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: prometheus-alertmanager
labels:
helm.sh/chart: alertmanager-0.22.2
app.kubernetes.io/name: alertmanager
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v0.24.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: alertmanager
app.kubernetes.io/instance: prometheus
serviceName: prometheus-alertmanager-headless
template:
metadata:
labels:
app.kubernetes.io/name: alertmanager
app.kubernetes.io/instance: prometheus
annotations:
checksum/config: d22f3c67cff89938da2f783e00040109619b87e969186bf3b190d32dc540202c
spec:
serviceAccountName: prometheus-alertmanager
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
containers:
- name: alertmanager
securityContext:
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
image: "quay.io/prometheus/alertmanager:v0.24.0"
imagePullPolicy: IfNotPresent
env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
args:
- --storage.path=/alertmanager
- --config.file=/etc/alertmanager/alertmanager.yml
ports:
- name: http
containerPort: 9093
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{}
volumeMounts:
- name: config
mountPath: /etc/alertmanager
- name: storage
mountPath: /alertmanager
volumes:
- name: config
configMap:
name: prometheus-alertmanager
volumeClaimTemplates:
- metadata:
name: storage
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi

View File

@ -0,0 +1,21 @@
---
# Source: prometheus/charts/alertmanager/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "prometheus-alertmanager-test-connection"
labels:
helm.sh/chart: alertmanager-0.22.2
app.kubernetes.io/name: alertmanager
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v0.24.0"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['prometheus-alertmanager:9093']
restartPolicy: Never

View File

@ -4,13 +4,13 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: kube-state-metrics-4.7.0
helm.sh/chart: kube-state-metrics-4.24.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "2.4.1"
app.kubernetes.io/version: "2.7.0"
name: prometheus-kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io

View File

@ -6,13 +6,13 @@ metadata:
name: prometheus-kube-state-metrics
namespace: default
labels:
helm.sh/chart: kube-state-metrics-4.7.0
helm.sh/chart: kube-state-metrics-4.24.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "2.4.1"
app.kubernetes.io/version: "2.7.0"
spec:
selector:
matchLabels:
@ -22,13 +22,13 @@ spec:
template:
metadata:
labels:
helm.sh/chart: kube-state-metrics-4.7.0
helm.sh/chart: kube-state-metrics-4.24.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "2.4.1"
app.kubernetes.io/version: "2.7.0"
spec:
hostNetwork: false
serviceAccountName: prometheus-kube-state-metrics
@ -40,10 +40,9 @@ spec:
- name: kube-state-metrics
args:
- --port=8080
- --resources=certificatesigningrequests,configmaps,cronjobs,daemonsets,deployments,endpoints,horizontalpodautoscalers,ingresses,jobs,limitranges,mutatingwebhookconfigurations,namespaces,networkpolicies,nodes,persistentvolumeclaims,persistentvolumes,poddisruptionbudgets,pods,replicasets,replicationcontrollers,resourcequotas,secrets,services,statefulsets,storageclasses,validatingwebhookconfigurations,volumeattachments
- --telemetry-port=8081
- --resources=certificatesigningrequests,configmaps,cronjobs,daemonsets,deployments,endpoints,horizontalpodautoscalers,ingresses,jobs,leases,limitranges,mutatingwebhookconfigurations,namespaces,networkpolicies,nodes,persistentvolumeclaims,persistentvolumes,poddisruptionbudgets,pods,replicasets,replicationcontrollers,resourcequotas,secrets,services,statefulsets,storageclasses,validatingwebhookconfigurations,volumeattachments
imagePullPolicy: IfNotPresent
image: "k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1"
image: "registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.7.0"
ports:
- containerPort: 8080
name: "http"

View File

@ -4,13 +4,13 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: kube-state-metrics-4.7.0
helm.sh/chart: kube-state-metrics-4.24.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "2.4.1"
app.kubernetes.io/version: "2.7.0"
name: prometheus-kube-state-metrics
rules:
@ -59,6 +59,11 @@ rules:
- jobs
verbs: ["list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources:
- leases
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- limitranges

View File

@ -6,13 +6,13 @@ metadata:
name: prometheus-kube-state-metrics
namespace: default
labels:
helm.sh/chart: kube-state-metrics-4.7.0
helm.sh/chart: kube-state-metrics-4.24.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "2.4.1"
app.kubernetes.io/version: "2.7.0"
annotations:
prometheus.io/scrape: 'true'
spec:

View File

@ -4,13 +4,13 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: kube-state-metrics-4.7.0
helm.sh/chart: kube-state-metrics-4.24.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "2.4.1"
app.kubernetes.io/version: "2.7.0"
name: prometheus-kube-state-metrics
namespace: default
imagePullSecrets:

View File

@ -0,0 +1,110 @@
---
# Source: prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: prometheus-prometheus-node-exporter
namespace: default
labels:
helm.sh/chart: prometheus-node-exporter-4.8.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: prometheus-node-exporter
app.kubernetes.io/name: prometheus-node-exporter
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "1.5.0"
spec:
selector:
matchLabels:
app.kubernetes.io/name: prometheus-node-exporter
app.kubernetes.io/instance: prometheus
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
labels:
helm.sh/chart: prometheus-node-exporter-4.8.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: prometheus-node-exporter
app.kubernetes.io/name: prometheus-node-exporter
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "1.5.0"
spec:
automountServiceAccountToken: false
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: prometheus-prometheus-node-exporter
containers:
- name: node-exporter
image: quay.io/prometheus/node-exporter:v1.5.0
imagePullPolicy: IfNotPresent
args:
- --path.procfs=/host/proc
- --path.sysfs=/host/sys
- --path.rootfs=/host/root
- --web.listen-address=[$(HOST_IP)]:9100
securityContext:
allowPrivilegeEscalation: false
env:
- name: HOST_IP
value: 0.0.0.0
ports:
- name: metrics
containerPort: 9100
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
httpHeaders:
path: /
port: 9100
scheme: HTTP
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
httpHeaders:
path: /
port: 9100
scheme: HTTP
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
volumeMounts:
- name: proc
mountPath: /host/proc
readOnly: true
- name: sys
mountPath: /host/sys
readOnly: true
- name: root
mountPath: /host/root
mountPropagation: HostToContainer
readOnly: true
hostNetwork: true
hostPID: true
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- name: proc
hostPath:
path: /proc
- name: sys
hostPath:
path: /sys
- name: root
hostPath:
path: /

View File

@ -0,0 +1,27 @@
---
# Source: prometheus/charts/prometheus-node-exporter/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: prometheus-prometheus-node-exporter
namespace: default
labels:
helm.sh/chart: prometheus-node-exporter-4.8.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: prometheus-node-exporter
app.kubernetes.io/name: prometheus-node-exporter
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "1.5.0"
annotations:
prometheus.io/scrape: "true"
spec:
type: ClusterIP
ports:
- port: 9100
targetPort: 9100
protocol: TCP
name: metrics
selector:
app.kubernetes.io/name: prometheus-node-exporter
app.kubernetes.io/instance: prometheus

View File

@ -0,0 +1,15 @@
---
# Source: prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-prometheus-node-exporter
namespace: default
labels:
helm.sh/chart: prometheus-node-exporter-4.8.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: prometheus-node-exporter
app.kubernetes.io/name: prometheus-node-exporter
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "1.5.0"

View File

@ -0,0 +1,61 @@
---
# Source: prometheus/charts/prometheus-pushgateway/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: prometheus-pushgateway-2.0.2
app.kubernetes.io/name: prometheus-pushgateway
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v1.5.1"
app.kubernetes.io/managed-by: Helm
name: prometheus-prometheus-pushgateway
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: prometheus-pushgateway
app.kubernetes.io/instance: prometheus
template:
metadata:
labels:
helm.sh/chart: prometheus-pushgateway-2.0.2
app.kubernetes.io/name: prometheus-pushgateway
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v1.5.1"
app.kubernetes.io/managed-by: Helm
spec:
serviceAccountName: prometheus-prometheus-pushgateway
containers:
- name: pushgateway
image: "prom/pushgateway:v1.5.1"
imagePullPolicy: IfNotPresent
ports:
- name: metrics
containerPort: 9091
protocol: TCP
livenessProbe:
httpGet:
path: /-/ready
port: 9091
initialDelaySeconds: 10
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /-/ready
port: 9091
initialDelaySeconds: 10
timeoutSeconds: 10
volumeMounts:
- name: storage-volume
mountPath: "/data"
subPath: ""
securityContext:
fsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
volumes:
- name: storage-volume
emptyDir: {}

View File

@ -0,0 +1,24 @@
---
# Source: prometheus/charts/prometheus-pushgateway/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/probe: pushgateway
labels:
helm.sh/chart: prometheus-pushgateway-2.0.2
app.kubernetes.io/name: prometheus-pushgateway
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v1.5.1"
app.kubernetes.io/managed-by: Helm
name: prometheus-prometheus-pushgateway
spec:
type: ClusterIP
ports:
- port: 9091
targetPort: 9091
protocol: TCP
name: http
selector:
app.kubernetes.io/name: prometheus-pushgateway
app.kubernetes.io/instance: prometheus

View File

@ -0,0 +1,12 @@
---
# Source: prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: prometheus-pushgateway-2.0.2
app.kubernetes.io/name: prometheus-pushgateway
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v1.5.1"
app.kubernetes.io/managed-by: Helm
name: prometheus-prometheus-pushgateway

View File

@ -1,14 +0,0 @@
---
# Source: prometheus/templates/alertmanager/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
component: "alertmanager"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-alertmanager
rules:
[]

View File

@ -1,20 +0,0 @@
---
# Source: prometheus/templates/alertmanager/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
component: "alertmanager"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-alertmanager
subjects:
- kind: ServiceAccount
name: prometheus-alertmanager
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus-alertmanager

View File

@ -1,23 +0,0 @@
---
# Source: prometheus/templates/alertmanager/cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
component: "alertmanager"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-alertmanager
namespace: default
data:
alertmanager.yml: |
global: {}
receivers:
- name: default-receiver
route:
group_interval: 5m
group_wait: 10s
receiver: default-receiver
repeat_interval: 3h

View File

@ -1,86 +0,0 @@
---
# Source: prometheus/templates/alertmanager/deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
component: "alertmanager"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-alertmanager
namespace: default
spec:
selector:
matchLabels:
component: "alertmanager"
app: prometheus
release: prometheus
replicas: 1
template:
metadata:
labels:
component: "alertmanager"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
spec:
serviceAccountName: prometheus-alertmanager
containers:
- name: prometheus-alertmanager
image: "quay.io/prometheus/alertmanager:v0.23.0"
imagePullPolicy: "IfNotPresent"
env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
args:
- --config.file=/etc/config/alertmanager.yml
- --storage.path=/data
- --cluster.listen-address=
- --web.external-url=http://localhost:9093
ports:
- containerPort: 9093
readinessProbe:
httpGet:
path: /-/ready
port: 9093
initialDelaySeconds: 30
timeoutSeconds: 30
resources:
{}
volumeMounts:
- name: config-volume
mountPath: /etc/config
- name: storage-volume
mountPath: "/data"
subPath: ""
- name: prometheus-alertmanager-configmap-reload
image: "jimmidyson/configmap-reload:v0.5.0"
imagePullPolicy: "IfNotPresent"
args:
- --volume-dir=/etc/config
- --webhook-url=http://127.0.0.1:9093/-/reload
resources:
{}
volumeMounts:
- name: config-volume
mountPath: /etc/config
readOnly: true
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
volumes:
- name: config-volume
configMap:
name: prometheus-alertmanager
- name: storage-volume
persistentVolumeClaim:
claimName: prometheus-alertmanager

View File

@ -1,19 +0,0 @@
---
# Source: prometheus/templates/alertmanager/pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
component: "alertmanager"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-alertmanager
namespace: default
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "2Gi"

View File

@ -1,25 +0,0 @@
---
# Source: prometheus/templates/alertmanager/service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
component: "alertmanager"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-alertmanager
namespace: default
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 9093
selector:
component: "alertmanager"
app: prometheus
release: prometheus
sessionAffinity: None
type: "ClusterIP"

View File

@ -1,15 +0,0 @@
---
# Source: prometheus/templates/alertmanager/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
component: "alertmanager"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-alertmanager
namespace: default
annotations:
{}

View File

@ -1,5 +1,5 @@
---
# Source: prometheus/templates/server/clusterrole.yaml
# Source: prometheus/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
@ -7,7 +7,7 @@ metadata:
component: "server"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
chart: prometheus-19.0.2
heritage: Helm
name: prometheus-server
rules:

View File

@ -1,5 +1,5 @@
---
# Source: prometheus/templates/server/clusterrolebinding.yaml
# Source: prometheus/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
@ -7,7 +7,7 @@ metadata:
component: "server"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
chart: prometheus-19.0.2
heritage: Helm
name: prometheus-server
subjects:

View File

@ -1,5 +1,5 @@
---
# Source: prometheus/templates/server/cm.yaml
# Source: prometheus/templates/cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
@ -7,11 +7,12 @@ metadata:
component: "server"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
chart: prometheus-19.0.2
heritage: Helm
name: prometheus-server
namespace: default
data:
allow-snippet-annotations: "false"
alerting_rules.yml: |
{}
alerts: |
@ -106,7 +107,7 @@ data:
- __meta_kubernetes_service_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
regex: (.+?)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
@ -149,7 +150,7 @@ data:
- __meta_kubernetes_service_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
regex: (.+?)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
@ -236,7 +237,7 @@ data:
- __meta_kubernetes_pod_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
regex: (.+?)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
@ -279,7 +280,7 @@ data:
- __meta_kubernetes_pod_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
regex: (.+?)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
@ -315,15 +316,12 @@ data:
- source_labels: [__meta_kubernetes_namespace]
regex: default
action: keep
- source_labels: [__meta_kubernetes_pod_label_app]
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
regex: prometheus
action: keep
- source_labels: [__meta_kubernetes_pod_label_component]
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]
regex: alertmanager
action: keep
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe]
regex: .*
action: keep
- source_labels: [__meta_kubernetes_pod_container_port_number]
regex: "9093"
action: keep

View File

@ -1,5 +1,5 @@
---
# Source: prometheus/templates/server/deploy.yaml
# Source: prometheus/templates/deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
@ -7,7 +7,7 @@ metadata:
component: "server"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
chart: prometheus-19.0.2
heritage: Helm
name: prometheus-server
namespace: default
@ -18,20 +18,23 @@ spec:
app: prometheus
release: prometheus
replicas: 1
strategy:
type: Recreate
rollingUpdate: null
template:
metadata:
labels:
component: "server"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
chart: prometheus-19.0.2
heritage: Helm
spec:
enableServiceLinks: true
serviceAccountName: prometheus-server
containers:
- name: prometheus-server-configmap-reload
image: "jimmidyson/configmap-reload:v0.5.0"
image: "jimmidyson/configmap-reload:v0.8.0"
imagePullPolicy: "IfNotPresent"
args:
- --volume-dir=/etc/config
@ -44,7 +47,7 @@ spec:
readOnly: true
- name: prometheus-server
image: "quay.io/prometheus/prometheus:v2.34.0"
image: "quay.io/prometheus/prometheus:v2.40.5"
imagePullPolicy: "IfNotPresent"
args:
- --storage.tsdb.retention.time=15d
@ -83,7 +86,6 @@ spec:
- name: storage-volume
mountPath: /data
subPath: ""
hostNetwork: false
dnsPolicy: ClusterFirst
securityContext:
fsGroup: 65534

View File

@ -1,76 +0,0 @@
---
# Source: prometheus/templates/node-exporter/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
component: "node-exporter"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-node-exporter
namespace: default
spec:
selector:
matchLabels:
component: "node-exporter"
app: prometheus
release: prometheus
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
component: "node-exporter"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
spec:
serviceAccountName: prometheus-node-exporter
containers:
- name: prometheus-node-exporter
image: "quay.io/prometheus/node-exporter:v1.3.0"
imagePullPolicy: "IfNotPresent"
args:
- --path.procfs=/host/proc
- --path.sysfs=/host/sys
- --path.rootfs=/host/root
- --web.listen-address=:9100
ports:
- name: metrics
containerPort: 9100
hostPort: 9100
resources:
{}
securityContext:
allowPrivilegeEscalation: false
volumeMounts:
- name: proc
mountPath: /host/proc
readOnly: true
- name: sys
mountPath: /host/sys
readOnly: true
- name: root
mountPath: /host/root
mountPropagation: HostToContainer
readOnly: true
hostNetwork: true
hostPID: true
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
volumes:
- name: proc
hostPath:
path: /proc
- name: sys
hostPath:
path: /sys
- name: root
hostPath:
path: /

View File

@ -1,15 +0,0 @@
---
# Source: prometheus/templates/node-exporter/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
component: "node-exporter"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-node-exporter
namespace: default
annotations:
{}

View File

@ -1,27 +0,0 @@
---
# Source: prometheus/templates/node-exporter/svc.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
component: "node-exporter"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-node-exporter
namespace: default
spec:
clusterIP: None
ports:
- name: metrics
port: 9100
protocol: TCP
targetPort: 9100
selector:
component: "node-exporter"
app: prometheus
release: prometheus
type: "ClusterIP"

View File

@ -1,14 +0,0 @@
---
# Source: prometheus/templates/pushgateway/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
component: "pushgateway"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-pushgateway
rules:
[]

View File

@ -1,20 +0,0 @@
---
# Source: prometheus/templates/pushgateway/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
component: "pushgateway"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-pushgateway
subjects:
- kind: ServiceAccount
name: prometheus-pushgateway
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus-pushgateway

View File

@ -1,54 +0,0 @@
---
# Source: prometheus/templates/pushgateway/deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
component: "pushgateway"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-pushgateway
namespace: default
spec:
selector:
matchLabels:
component: "pushgateway"
app: prometheus
release: prometheus
replicas: 1
template:
metadata:
labels:
component: "pushgateway"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
spec:
serviceAccountName: prometheus-pushgateway
containers:
- name: prometheus-pushgateway
image: "prom/pushgateway:v1.4.2"
imagePullPolicy: "IfNotPresent"
args:
ports:
- containerPort: 9091
livenessProbe:
httpGet:
path: /-/healthy
port: 9091
initialDelaySeconds: 10
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /-/ready
port: 9091
initialDelaySeconds: 10
timeoutSeconds: 10
resources:
{}
securityContext:
runAsNonRoot: true
runAsUser: 65534

View File

@ -1,26 +0,0 @@
---
# Source: prometheus/templates/pushgateway/service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/probe: pushgateway
labels:
component: "pushgateway"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-pushgateway
namespace: default
spec:
ports:
- name: http
port: 9091
protocol: TCP
targetPort: 9091
selector:
component: "pushgateway"
app: prometheus
release: prometheus
type: "ClusterIP"

View File

@ -1,15 +0,0 @@
---
# Source: prometheus/templates/pushgateway/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
component: "pushgateway"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
heritage: Helm
name: prometheus-pushgateway
namespace: default
annotations:
{}

View File

@ -1,5 +1,5 @@
---
# Source: prometheus/templates/server/pvc.yaml
# Source: prometheus/templates/pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@ -7,7 +7,7 @@ metadata:
component: "server"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
chart: prometheus-19.0.2
heritage: Helm
name: prometheus-server
namespace: default

View File

@ -1,5 +1,5 @@
---
# Source: prometheus/templates/server/service.yaml
# Source: prometheus/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
@ -7,7 +7,7 @@ metadata:
component: "server"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
chart: prometheus-19.0.2
heritage: Helm
name: prometheus-server
namespace: default

View File

@ -1,5 +1,5 @@
---
# Source: prometheus/templates/server/serviceaccount.yaml
# Source: prometheus/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
@ -7,7 +7,7 @@ metadata:
component: "server"
app: prometheus
release: prometheus
chart: prometheus-15.8.6
chart: prometheus-19.0.2
heritage: Helm
name: prometheus-server
namespace: default

View File

@ -31,33 +31,29 @@ resources:
- inflated/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
- inflated/ingress-nginx/templates/clusterrolebinding.yaml
# Prometheus
- inflated/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml
- inflated/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml
- inflated/prometheus/charts/prometheus-node-exporter/templates/service.yaml
- inflated/prometheus/charts/alertmanager/templates/serviceaccount.yaml
- inflated/prometheus/charts/alertmanager/templates/services.yaml
- inflated/prometheus/charts/alertmanager/templates/tests/test-connection.yaml
- inflated/prometheus/charts/alertmanager/templates/configmap.yaml
- inflated/prometheus/charts/alertmanager/templates/statefulset.yaml
- inflated/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml
- inflated/prometheus/charts/kube-state-metrics/templates/deployment.yaml
- inflated/prometheus/charts/kube-state-metrics/templates/role.yaml
- inflated/prometheus/charts/kube-state-metrics/templates/service.yaml
- inflated/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml
- inflated/prometheus/templates/node-exporter/daemonset.yaml
- inflated/prometheus/templates/node-exporter/serviceaccount.yaml
- inflated/prometheus/templates/node-exporter/svc.yaml
- inflated/prometheus/templates/pushgateway/serviceaccount.yaml
- inflated/prometheus/templates/pushgateway/service.yaml
- inflated/prometheus/templates/pushgateway/clusterrole.yaml
- inflated/prometheus/templates/pushgateway/deploy.yaml
- inflated/prometheus/templates/pushgateway/clusterrolebinding.yaml
- inflated/prometheus/templates/alertmanager/serviceaccount.yaml
- inflated/prometheus/templates/alertmanager/service.yaml
- inflated/prometheus/templates/alertmanager/clusterrole.yaml
- inflated/prometheus/templates/alertmanager/cm.yaml
- inflated/prometheus/templates/alertmanager/deploy.yaml
- inflated/prometheus/templates/alertmanager/clusterrolebinding.yaml
- inflated/prometheus/templates/alertmanager/pvc.yaml
- inflated/prometheus/templates/server/serviceaccount.yaml
- inflated/prometheus/templates/server/service.yaml
- inflated/prometheus/templates/server/clusterrole.yaml
- inflated/prometheus/templates/server/cm.yaml
- inflated/prometheus/templates/server/deploy.yaml
- inflated/prometheus/templates/server/clusterrolebinding.yaml
- inflated/prometheus/templates/server/pvc.yaml
- inflated/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml
- inflated/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml
- inflated/prometheus/charts/prometheus-pushgateway/templates/service.yaml
- inflated/prometheus/templates/serviceaccount.yaml
- inflated/prometheus/templates/service.yaml
- inflated/prometheus/templates/clusterrole.yaml
- inflated/prometheus/templates/cm.yaml
- inflated/prometheus/templates/deploy.yaml
- inflated/prometheus/templates/clusterrolebinding.yaml
- inflated/prometheus/templates/pvc.yaml
# Grafana
- inflated/grafana/templates/serviceaccount.yaml
- inflated/grafana/templates/dashboards-json-configmap.yaml
@ -66,7 +62,6 @@ resources:
- inflated/grafana/templates/role.yaml
- inflated/grafana/templates/service.yaml
- inflated/grafana/templates/clusterrole.yaml
- inflated/grafana/templates/podsecuritypolicy.yaml
- inflated/grafana/templates/configmap.yaml
- inflated/grafana/templates/clusterrolebinding.yaml
# kubernetes-replicator