k8s-manifests/monitoring/deployments.yaml

553 lines
17 KiB
YAML

apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '7'
meta.helm.sh/release-name: kube-prometheus-stack
meta.helm.sh/release-namespace: monitoring
labels:
app.kubernetes.io/instance: kube-prometheus-stack
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 12.4.1
helm.sh/chart: grafana-11.3.3
name: kube-prometheus-stack-grafana
namespace: monitoring
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: kube-prometheus-stack
app.kubernetes.io/name: grafana
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 8fbcc9c58b9e3857cbd07fc46643f422b04059fef425f2297da3a12920e3c0ee
checksum/sc-dashboard-provider-config: e70bf6a851099d385178a76de9757bb0bef8299da6d8443602590e44f05fdf24
checksum/secret: 9014ce2196439b89fe418661428f1bc68bdef9d82c4d4a793eb538ea5fa936c5
kubectl.kubernetes.io/default-container: grafana
kubectl.kubernetes.io/restartedAt: '2026-03-26T10:34:36Z'
labels:
app.kubernetes.io/instance: kube-prometheus-stack
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 12.4.1
helm.sh/chart: grafana-11.3.3
spec:
automountServiceAccountToken: true
containers:
- env:
- name: METHOD
value: WATCH
- name: LABEL
value: grafana_dashboard
- name: LABEL_VALUE
value: '1'
- name: FOLDER
value: /tmp/dashboards
- name: RESOURCE
value: both
- name: NAMESPACE
value: ALL
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
key: admin-user
name: kube-prometheus-stack-grafana
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: kube-prometheus-stack-grafana
- name: REQ_URL
value: http://localhost:3000/api/admin/provisioning/dashboards/reload
- name: REQ_METHOD
value: POST
image: quay.io/kiwigrid/k8s-sidecar:2.5.0
imagePullPolicy: IfNotPresent
name: grafana-sc-dashboard
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /tmp/dashboards
name: sc-dashboard-volume
- env:
- name: METHOD
value: WATCH
- name: LABEL
value: grafana_datasource
- name: LABEL_VALUE
value: '1'
- name: FOLDER
value: /etc/grafana/provisioning/datasources
- name: RESOURCE
value: both
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
key: admin-user
name: kube-prometheus-stack-grafana
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: kube-prometheus-stack-grafana
- name: REQ_URL
value: http://localhost:3000/api/admin/provisioning/datasources/reload
- name: REQ_METHOD
value: POST
image: quay.io/kiwigrid/k8s-sidecar:2.5.0
imagePullPolicy: IfNotPresent
name: grafana-sc-datasources
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
- env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
key: admin-user
name: kube-prometheus-stack-grafana
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: kube-prometheus-stack-grafana
- name: GF_PATHS_DATA
value: /var/lib/grafana/
- name: GF_PATHS_LOGS
value: /var/log/grafana
- name: GF_PATHS_PLUGINS
value: /var/lib/grafana/plugins
- name: GF_PATHS_PROVISIONING
value: /etc/grafana/provisioning
- name: GF_UNIFIED_STORAGE_INDEX_PATH
value: /var/lib/grafana-search/bleve
image: docker.io/grafana/grafana:12.4.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: grafana
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 30
name: grafana
ports:
- containerPort: 3000
name: grafana
protocol: TCP
- containerPort: 9094
name: gossip-tcp
protocol: TCP
- containerPort: 9094
name: gossip-udp
protocol: UDP
- containerPort: 6060
name: profiling
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/health
port: grafana
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 200m
memory: 256Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/grafana/grafana.ini
name: config
subPath: grafana.ini
- mountPath: /var/lib/grafana
name: storage
- mountPath: /var/lib/grafana-search
name: search
- mountPath: /tmp/dashboards
name: sc-dashboard-volume
- mountPath: /etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml
name: sc-dashboard-provider
subPath: provider.yaml
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
dnsPolicy: ClusterFirst
enableServiceLinks: true
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 472
runAsGroup: 472
runAsNonRoot: true
runAsUser: 472
serviceAccount: kube-prometheus-stack-grafana
serviceAccountName: kube-prometheus-stack-grafana
shareProcessNamespace: false
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
name: kube-prometheus-stack-grafana
name: config
- name: storage
persistentVolumeClaim:
claimName: kube-prometheus-stack-grafana
- emptyDir: {}
name: search
- emptyDir: {}
name: sc-dashboard-volume
- configMap:
defaultMode: 420
name: kube-prometheus-stack-grafana-config-dashboards
name: sc-dashboard-provider
- emptyDir: {}
name: sc-datasources-volume
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '3'
meta.helm.sh/release-name: kube-prometheus-stack
meta.helm.sh/release-namespace: monitoring
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: kube-prometheus-stack
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/version: 2.18.0
helm.sh/chart: kube-state-metrics-7.2.1
release: kube-prometheus-stack
name: kube-prometheus-stack-kube-state-metrics
namespace: monitoring
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: kube-prometheus-stack
app.kubernetes.io/name: kube-state-metrics
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: kube-prometheus-stack
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/version: 2.18.0
helm.sh/chart: kube-state-metrics-7.2.1
release: kube-prometheus-stack
spec:
automountServiceAccountToken: true
containers:
- args:
- --port=8080
- --resources=certificatesigningrequests,configmaps,cronjobs,daemonsets,deployments,endpointslices,horizontalpodautoscalers,ingresses,jobs,leases,limitranges,mutatingwebhookconfigurations,namespaces,networkpolicies,nodes,persistentvolumeclaims,persistentvolumes,poddisruptionbudgets,pods,replicasets,replicationcontrollers,resourcequotas,secrets,services,statefulsets,storageclasses,validatingwebhookconfigurations,volumeattachments
image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.18.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: http
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: kube-state-metrics
ports:
- containerPort: 8080
name: http
protocol: TCP
- containerPort: 8081
name: metrics
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: metrics
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
serviceAccount: kube-prometheus-stack-kube-state-metrics
serviceAccountName: kube-prometheus-stack-kube-state-metrics
terminationGracePeriodSeconds: 30
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '3'
meta.helm.sh/release-name: kube-prometheus-stack
meta.helm.sh/release-namespace: monitoring
labels:
app: kube-prometheus-stack-operator
app.kubernetes.io/component: prometheus-operator
app.kubernetes.io/instance: kube-prometheus-stack
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 82.13.5
chart: kube-prometheus-stack-82.13.5
heritage: Helm
release: kube-prometheus-stack
name: kube-prometheus-stack-operator
namespace: monitoring
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: kube-prometheus-stack-operator
release: kube-prometheus-stack
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: kube-prometheus-stack-operator
app.kubernetes.io/component: prometheus-operator
app.kubernetes.io/instance: kube-prometheus-stack
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 82.13.5
chart: kube-prometheus-stack-82.13.5
heritage: Helm
release: kube-prometheus-stack
spec:
automountServiceAccountToken: true
containers:
- args:
- --kubelet-service=kube-system/kube-prometheus-stack-kubelet
- --kubelet-endpoints=true
- --kubelet-endpointslice=false
- --localhost=127.0.0.1
- --prometheus-config-reloader=quay.io/prometheus-operator/prometheus-config-reloader:v0.89.0
- --config-reloader-cpu-request=0
- --config-reloader-cpu-limit=0
- --config-reloader-memory-request=0
- --config-reloader-memory-limit=0
- --thanos-default-base-image=quay.io/thanos/thanos:v0.41.0
- --secret-field-selector=type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1
- --web.enable-tls=true
- --web.cert-file=/cert/cert
- --web.key-file=/cert/key
- --web.listen-address=:10250
- --web.tls-min-version=VersionTLS13
env:
- name: GOGC
value: '30'
image: quay.io/prometheus-operator/prometheus-operator:v0.89.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: kube-prometheus-stack
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /cert
name: tls-secret
readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
serviceAccount: kube-prometheus-stack-operator
serviceAccountName: kube-prometheus-stack-operator
terminationGracePeriodSeconds: 30
volumes:
- name: tls-secret
secret:
defaultMode: 420
secretName: kube-prometheus-stack-admission
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '5'
name: uptime-kuma
namespace: monitoring
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: uptime-kuma
strategy:
type: Recreate
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: '2026-03-25T17:45:20Z'
labels:
app: uptime-kuma
spec:
containers:
- image: louislam/uptime-kuma:1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /
port: 3001
scheme: HTTP
initialDelaySeconds: 120
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 1
name: uptime-kuma
ports:
- containerPort: 3001
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /
port: 3001
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
cpu: 300m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /app/data
name: data
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: data
persistentVolumeClaim:
claimName: uptime-kuma-pvc