refactor(legacy): Remove standalone legacy services

This commit is contained in:
Tanguy Herbron 2023-08-01 00:18:45 +02:00
parent 90b1ffad7f
commit 2154743b3b
47 changed files with 0 additions and 39956 deletions

View File

@ -1,146 +0,0 @@
apiVersion: v1
data:
AdGuardHome.yaml: |
bind_host: 0.0.0.0
bind_port: 3000
users:
- name: $USER_NAME
password: $2a$10$$USER_PASSWORD
auth_attempts: 5
block_auth_min: 15
http_proxy: ""
web_session_ttl: 720
dns:
bind_hosts:
- 0.0.0.0
port: 53
statistics_interval: 1
querylog_enabled: true
querylog_file_enabled: true
querylog_interval: 24h
querylog_size_memory: 1000
anonymize_client_ip: false
protection_enabled: true
blocking_mode: default
blocking_ipv4: ""
blocking_ipv6: ""
blocked_response_ttl: 10
parental_block_host: family-block.dns.adguard.com
safebrowsing_block_host: standard-block.dns.adguard.com
ratelimit: 20
ratelimit_whitelist: []
refuse_any: true
upstream_dns:
- https://dns10.quad9.net/dns-query
- 8.8.8.8
- 8.8.4.4
- 1.1.1.1
upstream_dns_file: ""
bootstrap_dns:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
all_servers: true
fastest_addr: false
fastest_timeout: 1s
allowed_clients: []
disallowed_clients: []
blocked_hosts:
- version.bind
- id.server
- hostname.bind
trusted_proxies:
- 127.0.0.0/8
- ::1/128
cache_size: 4194304
cache_ttl_min: 0
cache_ttl_max: 0
cache_optimistic: false
bogus_nxdomain: []
aaaa_disabled: false
enable_dnssec: false
edns_client_subnet: false
max_goroutines: 300
ipset: []
filtering_enabled: true
filters_update_interval: 24
parental_enabled: false
safesearch_enabled: false
safebrowsing_enabled: false
safebrowsing_cache_size: 1048576
safesearch_cache_size: 1048576
parental_cache_size: 1048576
cache_time: 30
rewrites: $DNS_REWRITES
blocked_services:
- tiktok
upstream_timeout: 10s
local_domain_name: lan
resolve_clients: true
use_private_ptr_resolvers: true
local_ptr_upstreams: []
tls:
enabled: false
server_name: ""
force_https: false
port_https: 443
port_dns_over_tls: 853
port_dns_over_quic: 784
port_dnscrypt: 0
dnscrypt_config_file: ""
allow_unencrypted_doh: false
strict_sni_check: false
certificate_chain: ""
private_key: ""
certificate_path: ""
private_key_path: ""
filters:
- enabled: true
url: https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt
name: AdGuard DNS filter
id: 1
- enabled: true
url: https://adaway.org/hosts.txt
name: AdAway Default Blocklist
id: 2
- enabled: false
url: https://www.malwaredomainlist.com/hostslist/hosts.txt
name: MalwareDomainList.com Hosts List
id: 4
whitelist_filters: []
user_rules:
- '@@||v.oui.sncf^$important'
dhcp:
enabled: false
interface_name: ""
dhcpv4:
gateway_ip: ""
subnet_mask: ""
range_start: ""
range_end: ""
lease_duration: 86400
icmp_timeout_msec: 1000
options: []
dhcpv6:
range_start: ""
lease_duration: 86400
ra_slaac_only: false
ra_allow_slaac: false
clients: []
log_compress: false
log_localtime: false
log_max_backups: 0
log_max_size: 100
log_max_age: 3
log_file: ""
verbose: false
os:
group: ""
user: ""
rlimit_nofile: 0
schema_version: 12
kind: ConfigMap
metadata:
name: adguard-config
namespace: default

View File

@ -1,47 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: adguard
spec:
replicas: 1
selector:
matchLabels:
app: adguard
template:
metadata:
labels:
app: adguard
spec:
initContainers:
- name: config-binder
image: alpine
command: ["sh", "-c", "mkdir -p /adguard/conf && cp /binder/AdGuardHome.yaml /adguard/conf/AdGuardHome.yaml"]
volumeMounts:
- name: adguard-config-volume
mountPath: /binder
- name: adguard-data
mountPath: /adguard
containers:
- name: adguard
image: adguard/adguardhome
ports:
- containerPort: 53
protocol: UDP
- containerPort: 53
protocol: TCP
- containerPort: 3000
protocol: TCP
volumeMounts:
- name: adguard-data
mountPath: /opt/adguardhome/conf
subPath: conf
- name: adguard-data
mountPath: /opt/adguardhome/work
subPath: work
volumes:
- name: adguard-config-volume
configMap:
name: adguard-config
- name: adguard-data
persistentVolumeClaim:
claimName: adguard-pvc

View File

@ -1,18 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: adguard-ingress
annotations:
kubernetes.io/ingress.class: "traefik"
spec:
rules:
- host: adguard.beta.entos
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: adguard-svc
port:
number: 80

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: adguard-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 1Gi

View File

@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: adguard-svc
spec:
ports:
- name: http
protocol: TCP
port: 80
targetPort: 3000
- name: dns
protocol: UDP
port: 53
selector:
app: adguard
externalIPs:
# Node's physical IP / Internet accessible IP / Wireguard accessible IP
#
# Only exposing the DNS server as the dashboard only listens to the specified
# hostname (cf ingress file)
- 10.11.0.1

View File

@ -1,146 +0,0 @@
apiVersion: v1
data:
AdGuardHome.yaml: |
bind_host: 0.0.0.0
bind_port: 3000
users:
- name: $USER_NAME
password: $2a$10$$USER_PASSWORD
auth_attempts: 5
block_auth_min: 15
http_proxy: ""
web_session_ttl: 720
dns:
bind_hosts:
- 0.0.0.0
port: 53
statistics_interval: 1
querylog_enabled: true
querylog_file_enabled: true
querylog_interval: 24h
querylog_size_memory: 1000
anonymize_client_ip: false
protection_enabled: true
blocking_mode: default
blocking_ipv4: ""
blocking_ipv6: ""
blocked_response_ttl: 10
parental_block_host: family-block.dns.adguard.com
safebrowsing_block_host: standard-block.dns.adguard.com
ratelimit: 20
ratelimit_whitelist: []
refuse_any: true
upstream_dns:
- https://dns10.quad9.net/dns-query
- 8.8.8.8
- 8.8.4.4
- 1.1.1.1
upstream_dns_file: ""
bootstrap_dns:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
all_servers: true
fastest_addr: false
fastest_timeout: 1s
allowed_clients: []
disallowed_clients: []
blocked_hosts:
- version.bind
- id.server
- hostname.bind
trusted_proxies:
- 127.0.0.0/8
- ::1/128
cache_size: 4194304
cache_ttl_min: 0
cache_ttl_max: 0
cache_optimistic: false
bogus_nxdomain: []
aaaa_disabled: false
enable_dnssec: false
edns_client_subnet: false
max_goroutines: 300
ipset: []
filtering_enabled: true
filters_update_interval: 24
parental_enabled: false
safesearch_enabled: false
safebrowsing_enabled: false
safebrowsing_cache_size: 1048576
safesearch_cache_size: 1048576
parental_cache_size: 1048576
cache_time: 30
rewrites: $DNS_REWRITES
blocked_services:
- tiktok
upstream_timeout: 10s
local_domain_name: lan
resolve_clients: true
use_private_ptr_resolvers: true
local_ptr_upstreams: []
tls:
enabled: false
server_name: ""
force_https: false
port_https: 443
port_dns_over_tls: 853
port_dns_over_quic: 784
port_dnscrypt: 0
dnscrypt_config_file: ""
allow_unencrypted_doh: false
strict_sni_check: false
certificate_chain: ""
private_key: ""
certificate_path: ""
private_key_path: ""
filters:
- enabled: true
url: https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt
name: AdGuard DNS filter
id: 1
- enabled: true
url: https://adaway.org/hosts.txt
name: AdAway Default Blocklist
id: 2
- enabled: false
url: https://www.malwaredomainlist.com/hostslist/hosts.txt
name: MalwareDomainList.com Hosts List
id: 4
whitelist_filters: []
user_rules:
- '@@||v.oui.sncf^$important'
dhcp:
enabled: false
interface_name: ""
dhcpv4:
gateway_ip: ""
subnet_mask: ""
range_start: ""
range_end: ""
lease_duration: 86400
icmp_timeout_msec: 1000
options: []
dhcpv6:
range_start: ""
lease_duration: 86400
ra_slaac_only: false
ra_allow_slaac: false
clients: []
log_compress: false
log_localtime: false
log_max_backups: 0
log_max_size: 100
log_max_age: 3
log_file: ""
verbose: false
os:
group: ""
user: ""
rlimit_nofile: 0
schema_version: 12
kind: ConfigMap
metadata:
name: adguard-config
namespace: default

View File

@ -1,36 +0,0 @@
export USER_NAME=admin
export USER_PASSWORD=password
export DNS_REWRITES="\t- domain: socrates.halia
\t answer: 10.11.0.1
\t- domain: plotinus.halia
\t answer: 10.11.0.3
\t- domain: epicurus.halia
\t answer: 10.11.0.4
\t- domain: pythagoras-a.halia
\t answer: 10.11.0.5
\t- domain: pythagoras-b.halia
\t answer: 10.11.0.6
\t- domain: pythagoras-z.halia
\t answer: 10.11.0.7
\t- domain: archimedes.halia
\t answer: 10.11.0.8
\t- domain: plato.halia
\t answer: 10.11.0.9
\t- domain: platorrent.halia
\t answer: 10.11.0.10
\t- domain: heraclitus.halia
\t answer: 10.11.0.11
\t- domain: '*.entos'
\t answer: 10.11.0.1
\t- domain: diogenes.halia
\t answer: 10.11.0.12
\t- domain: '*.aristotle.halia'
\t answer: 10.11.0.2
\t- domain: aristotle.halia
\t answer: 10.11.0.2
\t- domain: '*.diogenes.halia'
\t answer: 10.11.0.12
\t- domain: k3s.beta
\t answer: 10.10.0.52
\t- domain: '*.k3s.beta'
\t answer: 10.10.0.52"

View File

@ -1,16 +0,0 @@
---
# Source: loki-stack/charts/grafana/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
name: loki-grafana-clusterrole
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"]

View File

@ -1,20 +0,0 @@
---
# Source: loki-stack/charts/grafana/templates/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: loki-grafana-clusterrolebinding
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
subjects:
- kind: ServiceAccount
name: loki-grafana
namespace: monitoring
roleRef:
kind: ClusterRole
name: loki-grafana-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@ -1,26 +0,0 @@
---
# Source: loki-stack/charts/grafana/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: loki-grafana
namespace: monitoring
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
data:
grafana.ini: |
[analytics]
check_for_updates = true
[grafana_net]
url = https://grafana.net
[log]
mode = console
[paths]
data = /var/lib/grafana/
logs = /var/log/grafana
plugins = /var/lib/grafana/plugins
provisioning = /etc/grafana/provisioning

View File

@ -1,29 +0,0 @@
---
# Source: loki-stack/templates/datasources.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: loki-loki-stack
namespace: monitoring
labels:
app: loki-stack
chart: loki-stack-2.8.2
release: loki
heritage: Helm
grafana_datasource: "1"
data:
loki-stack-datasource.yaml: |-
apiVersion: 1
datasources:
- name: Loki
type: loki
access: proxy
url: "http://loki:3100"
version: 1
isDefault: true
- name: Prometheus
type: prometheus
access: proxy
url: "http://prometheus-operated:9090"
version: 1
isDefault: false

View File

@ -1,128 +0,0 @@
---
# Source: loki-stack/charts/grafana/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: loki-grafana
namespace: monitoring
labels:
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
strategy:
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
annotations:
checksum/config: ab83ab2703f4417b0cae9771e0b48e1607056d6adac4d9d92f9b1960779034f5
checksum/dashboards-json-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
checksum/sc-dashboard-provider-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
checksum/secret: a8dec7c19ea590ef9d5a0075b8ed84bdf3a82ce47d9c86f5caada045396ab392
spec:
serviceAccountName: loki-grafana
automountServiceAccountToken: true
securityContext:
fsGroup: 472
runAsGroup: 472
runAsUser: 472
enableServiceLinks: true
containers:
- name: grafana-sc-datasources
image: "quay.io/kiwigrid/k8s-sidecar:1.15.6"
imagePullPolicy: IfNotPresent
env:
- name: METHOD
value: WATCH
- name: LABEL
value: "grafana_datasource"
- name: FOLDER
value: "/etc/grafana/provisioning/datasources"
- name: RESOURCE
value: "both"
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
name: loki-grafana
key: admin-user
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
name: loki-grafana
key: admin-password
- name: REQ_URL
value: http://localhost:3000/api/admin/provisioning/datasources/reload
- name: REQ_METHOD
value: POST
resources:
{}
volumeMounts:
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
- name: grafana
image: "grafana/grafana:8.3.5"
imagePullPolicy: IfNotPresent
volumeMounts:
- name: config
mountPath: "/etc/grafana/grafana.ini"
subPath: grafana.ini
- name: storage
mountPath: "/var/lib/grafana"
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
ports:
- name: service
containerPort: 80
protocol: TCP
- name: grafana
containerPort: 3000
protocol: TCP
env:
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
name: loki-grafana
key: admin-user
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: loki-grafana
key: admin-password
- name: GF_PATHS_DATA
value: /var/lib/grafana/
- name: GF_PATHS_LOGS
value: /var/log/grafana
- name: GF_PATHS_PLUGINS
value: /var/lib/grafana/plugins
- name: GF_PATHS_PROVISIONING
value: /etc/grafana/provisioning
livenessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 60
timeoutSeconds: 30
readinessProbe:
httpGet:
path: /api/health
port: 3000
resources:
{}
volumes:
- name: config
configMap:
name: loki-grafana
- name: storage
emptyDir: {}
- name: sc-datasources-volume
emptyDir: {}

View File

@ -1,23 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana-ingress
namespace: monitoring
annotations:
kubernetes.io/ingress.class: "traefik"
spec:
tls:
- secretName: grafana-beta-tls
hosts:
- grafana.beta.halia.dev
rules:
- host: grafana.beta.halia.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: loki-grafana
port:
number: 80

View File

@ -1,15 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- clusterrolebinding.yaml
- clusterrole.yaml
- configmap.yaml
- datasources.yaml
- deployment.yaml
- ingress.yaml
- podsecuritypolicy.yaml
- rolebinding.yaml
- role.yaml
- serviceaccount.yaml
- service.yaml

View File

@ -1,51 +0,0 @@
---
# Source: loki-stack/charts/grafana/templates/podsecuritypolicy.yaml
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: loki-grafana
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
# Default set from Docker, with DAC_OVERRIDE and CHOWN
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'csi'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
readOnlyRootFilesystem: false

View File

@ -1,18 +0,0 @@
---
# Source: loki-stack/charts/grafana/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: loki-grafana
namespace: monitoring
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [loki-grafana]

View File

@ -1,21 +0,0 @@
---
# Source: loki-stack/charts/grafana/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: loki-grafana
namespace: monitoring
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: loki-grafana
subjects:
- kind: ServiceAccount
name: loki-grafana
namespace: monitoring

View File

@ -1,21 +0,0 @@
---
# Source: loki-stack/charts/grafana/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: loki-grafana
namespace: monitoring
labels:
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
spec:
ports:
- name: service
port: 80
protocol: TCP
targetPort: grafana
selector:
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki

View File

@ -1,13 +0,0 @@
---
# Source: loki-stack/charts/grafana/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: grafana-6.24.1
app.kubernetes.io/name: grafana
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "8.3.5"
app.kubernetes.io/managed-by: Helm
name: loki-grafana
namespace: monitoring

View File

@ -1,9 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ./promtail
- ./loki
- ./prometheus
- ./grafana

View File

@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- rolebinding.yaml
- role.yaml
- secret.yaml
- serviceaccount.yaml
- service-headless.yaml
- service-memberlist.yaml
- service.yaml
- statefulset.yaml

View File

@ -1,17 +0,0 @@
---
# Source: loki-stack/charts/loki/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: loki
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [loki]

View File

@ -1,19 +0,0 @@
---
# Source: loki-stack/charts/loki/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: loki
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: loki
subjects:
- kind: ServiceAccount
name: loki

View File

@ -1,23 +0,0 @@
---
# Source: loki-stack/charts/loki/templates/service-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: loki-headless
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
variant: headless
spec:
clusterIP: None
ports:
- port: 3100
protocol: TCP
name: http-metrics
targetPort: http-metrics
selector:
app: loki
release: loki

View File

@ -1,24 +0,0 @@
---
# Source: loki-stack/charts/loki/templates/service-memberlist.yaml
apiVersion: v1
kind: Service
metadata:
name: loki-memberlist
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: http
port: 7946
targetPort: memberlist-port
protocol: TCP
selector:
app: loki
release: loki

View File

@ -1,24 +0,0 @@
---
# Source: loki-stack/charts/loki/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: loki
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
annotations:
{}
spec:
type: ClusterIP
ports:
- port: 3100
protocol: TCP
name: http-metrics
targetPort: http-metrics
selector:
app: loki
release: loki

View File

@ -1,15 +0,0 @@
---
# Source: loki-stack/charts/loki/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
annotations:
{}
name: loki
namespace: monitoring
automountServiceAccountToken: true

View File

@ -1,97 +0,0 @@
---
# Source: loki-stack/charts/loki/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: loki
namespace: monitoring
labels:
app: loki
chart: loki-2.16.0
release: loki
heritage: Helm
annotations:
{}
spec:
podManagementPolicy: OrderedReady
replicas: 1
selector:
matchLabels:
app: loki
release: loki
serviceName: loki-headless
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: loki
name: loki
release: loki
annotations:
checksum/config: 70f817aa5a2dd5f771aca66233ce0b140c925212f36795fdeb95102ca96db046
prometheus.io/port: http-metrics
prometheus.io/scrape: "true"
spec:
serviceAccountName: loki
securityContext:
fsGroup: 10001
runAsGroup: 10001
runAsNonRoot: true
runAsUser: 10001
initContainers:
[]
containers:
- name: loki
image: "grafana/loki:2.6.1"
imagePullPolicy: IfNotPresent
args:
- "-config.file=/etc/loki/loki.yaml"
volumeMounts:
- name: tmp
mountPath: /tmp
- name: config
mountPath: /etc/loki
- name: storage
mountPath: "/data"
subPath:
ports:
- name: http-metrics
containerPort: 3100
protocol: TCP
- name: grpc
containerPort: 9095
protocol: TCP
- name: memberlist-port
containerPort: 7946
protocol: TCP
livenessProbe:
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 45
readinessProbe:
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 45
resources:
{}
securityContext:
readOnlyRootFilesystem: true
env:
nodeSelector:
{}
affinity:
{}
tolerations:
[]
terminationGracePeriodSeconds: 4800
volumes:
- name: tmp
emptyDir: {}
- name: config
secret:
secretName: loki
- name: storage
emptyDir: {}

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring

File diff suppressed because it is too large Load Diff

View File

@ -1,24 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
verbs: ["get", "list","watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get"]
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: monitoring

View File

@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- bundle.yaml
- serviceaccount.yaml
- clusterrole.yaml
- clusterrolebinding.yaml
- prometheus.yaml

View File

@ -1,14 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
name: prometheus
spec:
serviceAccountName: prometheus
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector:
matchLabels:
team: core
resources:
requests:
memory: 400Mi
enableAdminAPI: false

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus

View File

@ -1,25 +0,0 @@
---
# Source: loki-stack/charts/promtail/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: loki-promtail
labels:
helm.sh/chart: promtail-6.3.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.6.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs:
- get
- watch
- list

View File

@ -1,20 +0,0 @@
---
# Source: loki-stack/charts/promtail/templates/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: loki-promtail
labels:
helm.sh/chart: promtail-6.3.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.6.1"
app.kubernetes.io/managed-by: Helm
subjects:
- kind: ServiceAccount
name: loki-promtail
namespace: monitoring
roleRef:
kind: ClusterRole
name: loki-promtail
apiGroup: rbac.authorization.k8s.io

View File

@ -1,102 +0,0 @@
---
# Source: loki-stack/charts/promtail/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: loki-promtail
namespace: monitoring
labels:
helm.sh/chart: promtail-6.3.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.6.1"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
updateStrategy:
{}
template:
metadata:
labels:
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
annotations:
checksum/config: 807310f261dd2585fdcb196f53c15ad3295af56ceac4869de7beaa331ecc9a3c
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: type
operator: NotIn
values:
- "outbound"
serviceAccountName: loki-promtail
securityContext:
runAsGroup: 0
runAsUser: 0
containers:
- name: promtail
image: "docker.io/grafana/promtail:2.6.1"
imagePullPolicy: IfNotPresent
args:
- "-config.file=/etc/promtail/promtail.yaml"
volumeMounts:
- name: config
mountPath: /etc/promtail
- mountPath: /run/promtail
name: run
- mountPath: /var/lib/docker/containers
name: containers
readOnly: true
- mountPath: /var/log/pods
name: pods
readOnly: true
env:
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- name: http-metrics
containerPort: 3101
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
readinessProbe:
failureThreshold: 5
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
volumes:
- name: config
secret:
secretName: loki-promtail
- hostPath:
path: /run/promtail
name: run
- hostPath:
path: /var/lib/docker/containers
name: containers
- hostPath:
path: /var/log/pods
name: pods

View File

@ -1,9 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- clusterrolebinding.yaml
- clusterrole.yaml
- daemonset.yaml
- secret.yaml
- serviceaccount.yaml

View File

@ -1,13 +0,0 @@
---
# Source: loki-stack/charts/promtail/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: loki-promtail
namespace: monitoring
labels:
helm.sh/chart: promtail-6.3.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.6.1"
app.kubernetes.io/managed-by: Helm

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: synapse-backup-pvc
namespace: synapse
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: flat-storage-class

View File

@ -1,62 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: synapse-config
namespace: synapse
data:
homeserver.yaml: |
enable_metrics: true
report_stats: false
server_name: "matrix.beta.halia.dev"
pid_file: "/data/homeserver.pid"
media_store_path: /data/media_store
trusted_key_servers:
- server_name: "matrix.org"
listeners:
- port: 8008
tls: false
type: http
x_forwarded: true
resources:
- names: [client, federation]
compress: false
- port: 9009
tls: false
type: metrics
bind_addresses: ["0.0.0.0"]
database:
name: psycopg2
args:
user: synapse
password: aberation
host: localhost
port: 5432
cp_min: 5
cp_max: 10
keepalives_idle: 10
keepalives_interval: 10
keepalives_count: 3
enable_registration: false
log_config: /data/matrix.beta.halia.dev.log.config
registration_shared_secret: "REDACTED"
form_secret: "REDACTED"
macaroon_secret_key: "REDACTED"
signing_key_path: /data/matrix.beta.halia.dev.signing.key
matrix.beta.halia.dev.log.config: |
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
handlers:
console:
class: logging.StreamHandler
formatter: precise
loggers:
synapse.storage.SQL:
level: INFO
root:
level: INFO
handlersr: [console]
disable_existing_loggers: false

View File

@ -1,25 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-job
namespace: synapse
spec:
schedule: "0 4 * * *" # Every day at 4AM
concurrencyPolicy: Forbid
jobTemplate:
spec:
template:
spec:
containers:
- name: postgres-backup
image: postgres:14-alpine3.15
command: ["sh", "-c", "PGPASSWORD=aberation /usr/local/bin/pg_dumpall -U synapse -h synapse-svc.synapse.svc.cluster.local > /backup/synapse/backup-$(date +'%H_%M-%d_%m_%Y').sql"]
volumeMounts:
- name: synapse-backup
mountPath: /backup/synapse
subPath: synapse
volumes:
- name: synapse-backup
persistentVolumeClaim:
claimName: synapse-backup-pvc
restartPolicy: OnFailure

View File

@ -1,57 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: synapse
namespace: synapse
spec:
replicas: 1
selector:
matchLabels:
app: synapse
template:
metadata:
labels:
app: synapse
spec:
nodeName: slave-1
securityContext:
fsGroup: 991
containers:
- name: synapse
image: matrixdotorg/synapse:latest
ports:
- containerPort: 8008
- containerPort: 9009
volumeMounts:
- mountPath: "/data"
name: synapse-data-pv
- mountPath: "/data/homeserver.yaml"
name: synapse-config-volume
subPath: homeserver.yaml
- mountPath: "/data/matrix.beta.halia.dev.log.config"
name: synapse-config-volume
subPath: matrix.beta.halia.dev.log.config
- name: synapse-db
image: postgres:14-alpine3.15
env:
- name: POSTGRES_DB
value: "synapse"
- name: POSTGRES_USER
value: "synapse"
- name: POSTGRES_PASSWORD
value: "aberation"
- name: POSTGRES_INITDB_ARGS
value: "--encoding=UTF8 --locale=C"
volumeMounts:
- mountPath: "/var/lib/postgresql/data"
name: synapse-db-pv
volumes:
- name: synapse-db-pv
hostPath:
path: "/mnt/synapse/db"
- name: synapse-data-pv
hostPath:
path: "/mnt/synapse/data"
- name: synapse-config-volume
configMap:
name: synapse-config

View File

@ -1,23 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: synapse-ingress
namespace: synapse
annotations:
kubernetes.io/ingress.class: "traefik"
spec:
tls:
- secretName: synapse-beta-tls
hosts:
- matrix.beta.halia.dev
rules:
- host: matrix.beta.halia.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: synapse-svc
port:
number: 80

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: synapse

View File

@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: synapse-svc
namespace: synapse
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8008
- name: metrics
port: 9000
protocol: TCP
targetPort: 9009
- name: db
port: 5432
protocol: TCP
targetPort: 5432
selector:
app: synapse