feat(monitoring): Manifests for Grafana/Prometheus/Loki/Promtail

Add manifests and basic configuration for all monitoring and visualization services
This commit is contained in:
Tanguy Herbron 2022-09-15 00:17:21 +02:00
parent 5db3f92c15
commit cc08c4bbc9
29 changed files with 974 additions and 81 deletions

View File

@ -1,27 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: internal-ipwhitelist
spec:
ipWhiteList:
sourceRange:
- 10.10.0.1/24
- 10.20.0.1/24
- 10.42.1.1/24
ipStrategy:
depth: 0
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard
spec:
entryPoints:
- websecure
routes:
- kind: Rule
match: Host(`traefik.k3s.beta`)
services:
- name: api@internal
kind: TraefikService

View File

@ -1,54 +0,0 @@
ports:
admin:
port: 8080
expose: true
exposePort: 8080
protocol: TCP
minecrafttcp:
port: 25565
expose: true
exposePort: 25565
protocol: TCP
web:
redirectTo: websecure
additionalArguments:
- --log.level=DEBUG
- --entrypoints.websecure.http.tls=true
- --entrypoints.websecure.http.tls.certresolver=letsencrypt
- --entrypoints.websecure.http.tls.domains[0].main=beta.halia.dev
- --entrypoints.websecure.http.tls.domains[0].sans=*.beta.halia.dev
- --certificatesresolvers.letsencrypt.acme.tlschallenge=true
- --certificatesresolvers.letsencrypt.acme.dnschallenge=true
- --certificatesresolvers.letsencrypt.acme.dnschallenge.provider=ovh
- --certificatesresolvers.letsencrypt.acme.dnschallenge.resolvers=1.1.1.1
- --certificatesresolvers.letsencrypt.acme.email=tanguy.herbron@outlook.com
- --certificatesresolvers.letsencrypt.acme.storage=/certs/acme.json
env:
- name: OVH_APPLICATION_KEY
valueFrom:
secretKeyRef:
key: appKey
name: ovh-api-credentials
- name: OVH_APPLICATION_SECRET
valueFrom:
secretKeyRef:
key: appSecret
name: ovh-api-credentials
- name: OVH_CONSUMER_KEY
valueFrom:
secretKeyRef:
key: consumerKey
name: ovh-api-credentials
- name: OVH_ENDPOINT
valueFrom:
secretKeyRef:
key: endpoint
name: ovh-api-credentials
persistence:
enabled: true
path: /certs
size: 128Mi
storageClass: "local-path"

View File

@ -0,0 +1,16 @@
---
# Source: loki-stack/charts/grafana/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
name: loki-grafana-clusterrole
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"]

View File

@ -0,0 +1,20 @@
---
# Source: loki-stack/charts/grafana/templates/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: loki-grafana-clusterrolebinding
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
subjects:
- kind: ServiceAccount
name: loki-grafana
namespace: monitoring
roleRef:
kind: ClusterRole
name: loki-grafana-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,26 @@
---
# Source: loki-stack/charts/grafana/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: loki-grafana
namespace: monitoring
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
data:
grafana.ini: |
[analytics]
check_for_updates = true
[grafana_net]
url = https://grafana.net
[log]
mode = console
[paths]
data = /var/lib/grafana/
logs = /var/log/grafana
plugins = /var/lib/grafana/plugins
provisioning = /etc/grafana/provisioning

View File

@ -0,0 +1,29 @@
---
# Source: loki-stack/templates/datasources.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: loki-loki-stack
namespace: monitoring
labels:
app: loki-stack
chart: loki-stack-2.8.2
release: loki
heritage: Helm
grafana_datasource: "1"
data:
loki-stack-datasource.yaml: |-
apiVersion: 1
datasources:
- name: Loki
type: loki
access: proxy
url: "http://loki:3100"
version: 1
isDefault: true
- name: Prometheus
type: prometheus
access: proxy
url: "http://prometheus-svc:3100"
version: 1
isDefault: false

View File

@ -0,0 +1,131 @@
---
# Source: loki-stack/charts/grafana/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: loki-grafana
namespace: monitoring
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
strategy:
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
annotations:
checksum/config: ab83ab2703f4417b0cae9771e0b48e1607056d6adac4d9d92f9b1960779034f5
checksum/dashboards-json-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
checksum/sc-dashboard-provider-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
checksum/secret: a8dec7c19ea590ef9d5a0075b8ed84bdf3a82ce47d9c86f5caada045396ab392
spec:
serviceAccountName: loki-grafana
automountServiceAccountToken: true
securityContext:
fsGroup: 472
runAsGroup: 472
runAsUser: 472
enableServiceLinks: true
containers:
- name: grafana-sc-datasources
image: "quay.io/kiwigrid/k8s-sidecar:1.15.6"
imagePullPolicy: IfNotPresent
env:
- name: METHOD
value: WATCH
- name: LABEL
value: "grafana_datasource"
- name: FOLDER
value: "/etc/grafana/provisioning/datasources"
- name: RESOURCE
value: "both"
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
name: loki-grafana
key: admin-user
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
name: loki-grafana
key: admin-password
- name: REQ_URL
value: http://localhost:3000/api/admin/provisioning/datasources/reload
- name: REQ_METHOD
value: POST
resources:
{}
volumeMounts:
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
- name: grafana
image: "grafana/grafana:8.3.5"
imagePullPolicy: IfNotPresent
volumeMounts:
- name: config
mountPath: "/etc/grafana/grafana.ini"
subPath: grafana.ini
- name: storage
mountPath: "/var/lib/grafana"
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
ports:
- name: service
containerPort: 80
protocol: TCP
- name: grafana
containerPort: 3000
protocol: TCP
env:
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
name: loki-grafana
key: admin-user
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: loki-grafana
key: admin-password
- name: GF_PATHS_DATA
value: /var/lib/grafana/
- name: GF_PATHS_LOGS
value: /var/log/grafana
- name: GF_PATHS_PLUGINS
value: /var/lib/grafana/plugins
- name: GF_PATHS_PROVISIONING
value: /etc/grafana/provisioning
livenessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 60
timeoutSeconds: 30
readinessProbe:
httpGet:
path: /api/health
port: 3000
resources:
{}
volumes:
- name: config
configMap:
name: loki-grafana
- name: storage
emptyDir: {}
- name: sc-datasources-volume
emptyDir: {}

View File

@ -0,0 +1,51 @@
---
# Source: loki-stack/charts/grafana/templates/podsecuritypolicy.yaml
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: loki-grafana
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
# Default set from Docker, with DAC_OVERRIDE and CHOWN
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'csi'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
readOnlyRootFilesystem: false

View File

@ -0,0 +1,18 @@
---
# Source: loki-stack/charts/grafana/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: loki-grafana
namespace: monitoring
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [loki-grafana]

View File

@ -0,0 +1,21 @@
---
# Source: loki-stack/charts/grafana/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: loki-grafana
namespace: monitoring
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: loki-grafana
subjects:
- kind: ServiceAccount
name: loki-grafana
namespace: monitoring

View File

@ -0,0 +1,24 @@
---
# Source: loki-stack/charts/grafana/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: loki-grafana
namespace: monitoring
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- name: service
port: 80
protocol: TCP
targetPort: 3000
selector:
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki

View File

@ -0,0 +1,13 @@
---
# Source: loki-stack/charts/grafana/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
name: loki-grafana
namespace: monitoring

17
monitoring/loki/role.yaml Normal file
View File

@ -0,0 +1,17 @@
---
# Source: loki-stack/charts/loki/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: loki
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [loki]

View File

@ -0,0 +1,19 @@
---
# Source: loki-stack/charts/loki/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: loki
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: loki
subjects:
- kind: ServiceAccount
name: loki

View File

@ -0,0 +1,23 @@
---
# Source: loki-stack/charts/loki/templates/service-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: loki-headless
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
variant: headless
spec:
clusterIP: None
ports:
- port: 3100
protocol: TCP
name: http-metrics
targetPort: http-metrics
selector:
app: loki
release: loki

View File

@ -0,0 +1,24 @@
---
# Source: loki-stack/charts/loki/templates/service-memberlist.yaml
apiVersion: v1
kind: Service
metadata:
name: loki-memberlist
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: http
port: 7946
targetPort: memberlist-port
protocol: TCP
selector:
app: loki
release: loki

View File

@ -0,0 +1,24 @@
---
# Source: loki-stack/charts/loki/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: loki
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
annotations:
{}
spec:
type: ClusterIP
ports:
- port: 3100
protocol: TCP
name: http-metrics
targetPort: http-metrics
selector:
app: loki
release: loki

View File

@ -0,0 +1,15 @@
---
# Source: loki-stack/charts/loki/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
annotations:
{}
name: loki
namespace: monitoring
automountServiceAccountToken: true

View File

@ -0,0 +1,97 @@
---
# Source: loki-stack/charts/loki/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: loki
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
annotations:
{}
spec:
podManagementPolicy: OrderedReady
replicas: 1
selector:
matchLabels:
app: loki
release: loki
serviceName: loki-headless
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: loki
name: loki
release: loki
annotations:
checksum/config: 70f817aa5a2dd5f771aca66233ce0b140c925212f36795fdeb95102ca96db046
prometheus.io/port: http-metrics
prometheus.io/scrape: "true"
spec:
serviceAccountName: loki
securityContext:
fsGroup: 10001
runAsGroup: 10001
runAsNonRoot: true
runAsUser: 10001
initContainers:
[]
containers:
- name: loki
image: "grafana/loki:2.6.1"
imagePullPolicy: IfNotPresent
args:
- "-config.file=/etc/loki/loki.yaml"
volumeMounts:
- name: tmp
mountPath: /tmp
- name: config
mountPath: /etc/loki
- name: storage
mountPath: "/data"
subPath:
ports:
- name: http-metrics
containerPort: 3100
protocol: TCP
- name: grpc
containerPort: 9095
protocol: TCP
- name: memberlist-port
containerPort: 7946
protocol: TCP
livenessProbe:
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 45
readinessProbe:
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 45
resources:
{}
securityContext:
readOnlyRootFilesystem: true
env:
nodeSelector:
{}
affinity:
{}
tolerations:
[]
terminationGracePeriodSeconds: 4800
volumes:
- name: tmp
emptyDir: {}
- name: config
secret:
secretName: loki
- name: storage
emptyDir: {}

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring

View File

@ -0,0 +1,34 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups:
- extensions
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: default
namespace: monitoring

View File

@ -0,0 +1,159 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-server-conf
labels:
name: prometheus-server-conf
namespace: monitoring
data:
prometheus.rules: |-
groups:
- name: devopscube demo alert
rules:
- alert: High Pod Memory
expr: sum(container_memory_usage_bytes) > 1
for: 1m
labels:
severity: slack
annotations:
summary: High Memory Usage
prometheus.yml: |-
global:
scrape_interval: 5s
evaluation_interval: 5s
rule_files:
- /etc/prometheus/prometheus.rules
alerting:
alertmanagers:
- scheme: http
static_configs:
- targets:
- "alertmanager.monitoring.svc:9093"
scrape_configs:
- job_name: 'node-exporter'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_endpoints_name]
regex: 'node-exporter'
action: keep
- job_name: 'kubernetes-apiservers'
kubernetes_sd_configs:
- role: endpoints
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: default;kubernetes;https
- job_name: 'kubernetes-nodes'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
- job_name: 'kubernetes-pods'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
- job_name: 'kube-state-metrics'
static_configs:
- targets: ['kube-state-metrics.kube-system.svc.cluster.local:8080']
- job_name: 'kubernetes-cadvisor'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
- job_name: 'kubernetes-service-endpoints'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
regex: (https?)
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
action: replace
target_label: __address__
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: kubernetes_name

View File

@ -0,0 +1,45 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
namespace: monitoring
labels:
app: prometheus
spec:
replicas: 1
selector:
matchLabels:
app: prometheus
template:
metadata:
labels:
app: prometheus
spec:
containers:
- name: prometheus
image: prom/prometheus
args:
- "--storage.tsdb.retention.time=12h"
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus/"
ports:
- containerPort: 9090
resources:
requests:
cpu: 500m
memory: 500M
limits:
cpu: 1
memory: 1Gi
volumeMounts:
- name: prometheus-config-volume
mountPath: /etc/prometheus
- name: prometheus-storage-volume
mountPath: /prometheus/
volumes:
- name: prometheus-config-volume
configMap:
defaultMode: 420
name: prometheus-server-conf
- name: prometheus-storage-volume
emptyDir: {}

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: prometheus-svc
namespace: monitoring
spec:
ports:
- name: http
port: 9090
protocol: TCP
targetPort: 9090
selector:
app: prometheus

View File

@ -0,0 +1,25 @@
---
# Source: loki-stack/charts/promtail/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: loki-promtail
labels:
helm.sh/chart: promtail-6.3.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.6.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs:
- get
- watch
- list

View File

@ -0,0 +1,20 @@
---
# Source: loki-stack/charts/promtail/templates/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: loki-promtail
labels:
helm.sh/chart: promtail-6.3.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.6.1"
app.kubernetes.io/managed-by: Helm
subjects:
- kind: ServiceAccount
name: loki-promtail
namespace: monitoring
roleRef:
kind: ClusterRole
name: loki-promtail
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,93 @@
---
# Source: loki-stack/charts/promtail/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: loki-promtail
namespace: monitoring
labels:
helm.sh/chart: promtail-6.3.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.6.1"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
updateStrategy:
{}
template:
metadata:
labels:
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
annotations:
checksum/config: 807310f261dd2585fdcb196f53c15ad3295af56ceac4869de7beaa331ecc9a3c
spec:
serviceAccountName: loki-promtail
securityContext:
runAsGroup: 0
runAsUser: 0
containers:
- name: promtail
image: "docker.io/grafana/promtail:2.6.1"
imagePullPolicy: IfNotPresent
args:
- "-config.file=/etc/promtail/promtail.yaml"
volumeMounts:
- name: config
mountPath: /etc/promtail
- mountPath: /run/promtail
name: run
- mountPath: /var/lib/docker/containers
name: containers
readOnly: true
- mountPath: /var/log/pods
name: pods
readOnly: true
env:
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- name: http-metrics
containerPort: 3101
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
readinessProbe:
failureThreshold: 5
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
volumes:
- name: config
secret:
secretName: loki-promtail
- hostPath:
path: /run/promtail
name: run
- hostPath:
path: /var/lib/docker/containers
name: containers
- hostPath:
path: /var/log/pods
name: pods

View File

@ -0,0 +1,13 @@
---
# Source: loki-stack/charts/promtail/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: loki-promtail
namespace: monitoring
labels:
helm.sh/chart: promtail-6.3.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.6.1"
app.kubernetes.io/managed-by: Helm