Compare commits

..

1 Commits
dev ... master

Author SHA1 Message Date
d27117843e Update README.md 2022-02-27 13:50:43 +00:00
118 changed files with 18 additions and 37823 deletions

View File

@ -1,13 +0,0 @@
deploy:
stage: deploy
image:
name: bitnami/kubectl
entrypoint: ['']
script:
- kubectl config get-contexts
- kubectl config use-context athens-school/k3s-cluster:dev-env
- kubectl get pods
- kubectl apply -f traefik-lb
- kubectl apply -f nginx
only:
- dev

View File

@ -1 +0,0 @@

View File

@ -1,16 +0,0 @@
dev:
kubectl taint nodes outsider type=services:NoSchedule --overwrite
kubectl apply -k environments/dev/bootstrap --all
kubectl apply -k environments/dev --prune=true --all
prod:
kubectl taint node -l type=outbound type=services:NoSchedule --overwrite
kubectl apply -k environments/prod/bootstrap --all
kubectl apply -k environments/prod --all
monitor:
kubectl apply -f nginx/external/servicemonitor.yaml
kubectl apply -f nginx/internal/servicemonitor.yaml
kubectl apply -f argo/servicemonitor.yaml
kubectl apply -f cloudnativepg/podmonitor.yaml

188
README.md
View File

@ -1,172 +1,20 @@
# K3s cluster
## CRDs
| Name | Description | Operator | Prometheus integration |
| ------------------------------------------------------------------------ | ----------------------------- | -------- | ---------------------- |
| [Nginx](https://docs.nginx.com/nginx-ingress-controller/) | Kubernetes Ingress Controller | No | Configured |
| [Prometheus](https://github.com/prometheus-operator/prometheus-operator) | Metrics scraping | Yes | Configured |
| [ArgoCD](https://argo-cd.readthedocs.io/en/stable/) | Declarative GitOps CD | No | Configured |
| [Longhorn](https://longhorn.io/) | Distributed block storage | No | Configured |
| [MetalLB](https://metallb.universe.tf/) | Vare metal load-balancer | No | Not configured |
| [CloudNativePG](https://cloudnative-pg.io/) | PostgreSQL operator | Yes | Configured |
| [SOPS](https://github.com/isindir/sops-secrets-operator) | Secret management | Yes | Not configured |
## Services
| Name | Usage | Accessibility | Host | DB type | Additional data | Backup configuration | Loki integration | Prometheus integration | Secret management | Status | Standalone migration |
| ----------------------- | ------------------------------------ | ---------------- | ---------- | ---------- | -------------------- | ---------------------- | ---------------- | ---------------------- | ---------------------- | ----------------------------- | --------------------- |
| Nginx | Reverse proxy and load balancer | Public & Private | [Ingresses]| - | - | - | Configured | Configured | - | Completed<sup>5</sup> | Backbone |
| ArgoCD | Declarative GitOPS CD | Private | [Workers] | - | - | - | Configured | Configured | - | Completed | Backbone |
| Vaultwarden | Password manager | Public | [Workers] | PostgreSQL | - | - | Configured | Not available | Configured | Completed | Completed |
| Gitea | Version control system | Public | [Workers] | PostgreSQL | User created content | Configured<sup>9</sup> | Configured | Configured | Configured | Completed<sup>4</sup> | Completed |
| Synapse | Matrix server - Message centralizer | Public | [Workers] | PostgreSQL | User files | Configured<sup>9</sup> | Configured | Configured | Configured | Completed | Completed |
| Grafana | Graph visualizer | Private | [Workers] | - | - | - | Configured | Configured | Configured | Completed | Completed<sup>8</sup> |
| Prometheus | Metrics aggregator | Private | [Workers] | - | - | Configured<sup>9</sup> | Configured | Configured | - | Completed | Completed<sup>8</sup> |
| Loki | Log aggregator | Private | [Workers] | \_ | - | Configured<sup>9</sup> | Configured | Configured | - | Completed | Completed<sup>8</sup> |
| Adguard | DNS ad blocker and custom DNS server | Private | [Egress] | - | - | - | Configured | Configured | Configured | Completed | Completed |
| Home assistant | Home automation and monitoring | Private | [Workers] | PostgreSQL | Additional data | Configured<sup>9</sup> | Configured | Configured | Configured | Completed | Completed |
| Owncloud Infinity Scale | File hosting webUI | Public | [Workers] | ? | Drive files | Not configured | Configured | Not configured | Configured | Pending configuration | Awaiting |
| therbron.com | Personal website | Public | [Workers] | - | - | - | Not configured | Not configured | - | Awaiting configuration | Awaiting |
| Radarr | Movie collection manager | Private | [Workers] | PostgreSQL | - | - | Configured | Not configured | Not configured | Partial | Awaiting |
| Flaresolverr | Cloudflare proxy | Private | [Workers] | - | - | - | - | - | - | Completed | Awaiting |
| Sonarr | TV shows collection manager | Private | [Workers] | SQLite | - | Not configured | Configured | Not configured | Not configured | Partial | Awaiting |
| Prowlarr | Torrent indexer | Private | [Workers] | PostgreSQL | - | Not configured | Configured | Not available | Not configured | Partial | Awaiting |
| Jellyfin | Media streaming | Public | Archimedes | SQLite\*\* | - | - | Configured | Not configured | Configured<sup>6</sup> | Completed | Awaiting |
| Jellyseerr | Media requesting WebUI | Public | [Workers] | - | - | - | Not configured | Not available | Configured<sup>7</sup> | Awaiting configuration | Awaiting |
| Minecraft | Vanilla minecraft server for friends | Public | Archimedes | - | Game map | Not configured | Not configured | Not configured | - | Awaiting configuration | Awaiting |
| Satisfactory | Satisfactory server for friends | Public | Archimedes | - | Game map | Not configured | Not configured | Not configured | - | Not needed for v1 | Awaiting |
| Space engineers | Space engineers server for friends | Public | Archimedes | - | Game map | Not configured | Not configured | Not configured | - | Not needed for v1 | Awaiting |
| Raspsnir | Bachelor memorial website | Public | [Workers] | PostgreSQL | - | Not configured | Not configured | Not configured | - | Not needed for v1 | Awaiting |
| Vikunja | To-do and Kanban boards | Public | [Workers] | - | - | - | Not configured | Not configured | - | Migrate to Gitea | Awaiting |
| Wiki | Documentation manager | Public | [Workers] | - | - | - | Not configured | Not configured | - | Migrate to VuePress and Gitea | Awaiting |
| PaperlessNG | PDF viewer and organiser | Public | [Workers] | PostgreSQL | - | - | Not configured | Not configured | - | Research migration into OCIS | Awaiting |
\* Configuration panel only available internally<br>
\*\* Current implementation only support SQLite, making manual backups a necessity<br>
<sup>4</sup> Configuration completed, awaiting data migration from Gitlab<br>
<sup>5</sup> Missing dashboard configuration<br>
<sup>6</sup> Done through volume backup, because not possible otherwise<br>
<sup>7</sup> Done, but needs a reimplementation using kustomize for secret separation from configmap<br>
<sup>8</sup> Done but included in a grouped project `Monitoring`<br>
<sup>9</sup> Handled by Longhorn<br>
## Backup management
### Databases
// To complete
### Additional data
All additional data needing to be backed up is mounted to a longhorn volume, to also benefit from scheduled backups.
Example :
```
longhorn
└───backups
└───vaultwarden
│ └───<backup_date>.sql
│ │ ...
└───gitlab
└───<backup_date>.sql
│ ...
```
## TODO
- ~~Add AntiAffinities to `outsider` nodes~~
- ~~Migrate Homeassistant to PostgreSQL instead of MariaDB~~
- ~~Move Prometheus connection management to ServiceMonitors instead of ConfigMap~~
- ~~Configure Alertmanager with basic webhook (discord)~~
- ~~Configure Prometheus alerts~~
- ~~Schedule longhorn S3 backups~~
- ~~Schedule CloudNativePG S3 backups~~
- ~~Restrict `metrics` endpoint on public services~~ See Gitea repository for example
- ~~Move from NFS to S3 mounts for NAS volumes~~
- ~~Migrate Vaultwarden to PostgreSQL instead of MariaDB~~
- ~~Deploy PostgresQL cluster using operator for database HA and easy maintenance~~ - To be tested properly
- Change host/deployment specific variables to use environment variables (using Kustomize)
- ~~Write CI/CD pipeline to create environment loaded files~~ Done with Kustomize migration
- ~~Write CI/CD pipeline to deploy cluster~~ Done with ArgoCD
- ~~Setup internal traefik with nodeport as reverse proxy for internal only services~~ Done through double ingress class and LB
- ~~Setup DB container sidecars for automated backups to Longhorn volume~~
- ~~Setup secrets configuration through CI/CD variable injection (using Kustomize)~~ Environment modified by SOPS implementation
- ~~Figure out SOPS secret injection for absent namespaces~~
- ~~Explore permission issues when issuing OVH API keys (not working for wildcard and `beta.halia.dev` subdomain)~~ Supposedly done
- Setup default users for deployments
- ~~Setup log and metric monitoring~~
- ~~Define namespaces through yaml files~~
- ~~Look into CockroachDB for redundant database~~ Judged too complicated, moving to a 1 to 1 relationship between services and databases
- ~~Configure IP range accessibility through Traefik (Internal vs external services)~~ Impossible because of flannel ip-masq
- ~~Move secrets to separate, private Git repository ?~~ Done with SOPS
- ~~Configure NFS connection for media library~~
- ~~Research IPv6 configuration for outsider node~~ Impossible in Denmark while using YouSee as an ISP for now (no IPv6 support)
- ~~Write small script for auto installation of the cluster, to split API calls into 2 stages (solves MetalLB API not found error)~~
- ~~Migrate ingresses to traefik kind instead of k8s kind~~ Migrated to Nginx ingress controller
- Implement Redis operator and document all services using Redis
- ~~Implement Kustomization file and bootstrap loading for cloudnativepg s3 secret~~
## Notes
### Cluster base setup
Setup the cluster's backbone
```
make dev
# Include SOPS master secret generation
kubectl create secret generic age-key --from-file=~/.sops/key.txt -n sops
```
NOTE: It might be required to update the metallb IP range as well as traefik LoadBalancerIPs
### Convert helm chart to k3s manifest
`helm template chart stable/chart --output-dir ./chart`
### Gitlab backup process
Because gitlab does not offer the possibility to backup a container's data from an external container, a cronjob has been implemented in the custom image used for deployment.
NOTE: This does not apply anymore, as a migration is planned to Gitea
### VPN configuration for Deluge
~~Instead of adding an extra networking layer to the whole cluster, it seems like a better idea to just integrate a wireguard connection inside of the deluge image, and self-build everything within Gitlab registry.
This image could utilize kubernetes secrets, including a "torrent-vpn" secret produces by the initial wireguard configuration done via Ansible.
This ansible script could create one (or more) additional client(s) depending on the inventory configuration, and keep the "torrent-vpn" configuration file within a k3s formated file, inside of the auto-applied directory on CP.<br>
Cf : https://docs.k3s.io/advanced#auto-deploying-manifests~~
After furhter reflection, it doesn't make sense to have Deluge being part of the cluster. It will be moved to the NAS, as it can run only when the NAS is running. This will also ease the whole VPN configuration.
### Longhorn backup configuration
Backup target : `s3://halis@eu-west-1/longhorn/`
Backup target credential secret : `minio-secrets`
### Development domains
To access a service publicly when developing, the domain name should be _.beta.halia.dev
To only expose a service internally, the domain name should be _.beta.entos
### Ingresses
To split between external and internal services, two traefik ingresses are implemented through the `ingressclass` annotation.
`traefik-external` will only allow external access to a given service, while `traefik-internal` restrict to an internal only access.
### Secret management
All secrets are encrypted using SOPS and stored in a private secret repository.
Secrets are decrypted on the fly when applied to the kluster using the SOPS Operator.
Inject the AGE key in the cluster to allow the operator to decrypt secrets :
```
kubectl create secret generic age-key --from-file=<path_to_file> -n sops
```
### Transfer files
`kubectl cp <pod_name>:<path_to_file> <path_to_file>`
## Doing
- Migrate Netbird installation to K8s manifests
- Migrate nodes to Netbird network
| Name | Usage | Accessibility | Host | Automated backups |
|--|--|--|--|--|
| therbron.com | Personal website | Public | Socrates |Nothing to backup|
| Nginx Proxy Manager | Reverse proxy management | Public* | Socrates |<center> Replaced by internal Ingress </center>|
| Adguard | DNS ad blocker and custom DNS server | Private | Socrates |<center> ? </center>|
| Nextcloud | File hosting suite | Public | Plato |<center> ? </center>|
| Collabora | Online office suite | Public | Plato |<center> ? </center>|
| Home assistant | Home automation and monitoring | Private | Pythagoras-a |<center> ? </center>|
| Pwndrop | On the fly file downloading | Public* | Pythagoras-b |<center> ? </center>|
| Vikunja | To-do and Kanban boards | Public | Pythagoras-b |<center> ? |
| Gitlab | Version control system | Public | Pythagoras-b |<center> ? </center>|
| Wiki | Documentation manager | Public | Pythagoras-b |<center> ? </center>|
| Bitwarden | Password manager | Public | Pythagoras-b |<center> ? </center>|
| Jellyfin | Media streaming | Public | Archimedes |<center> ? </center>|
| Sonarr | TV shows collection manager | Private | Plato |<center> ? </center>|
| Radarr | Movie collection manager | Private | Plato |<center> ? </center>|
| Jackett | Torrent indexer | Private | Plato |<center> ? </center>|
| Deluge | Torrent client | Private | Plato |<center> ? </center>|

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: gitea
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://git.halis.io/athens-school/gitea
targetRevision: k3s
path: manifests
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- PruneLast=true
destination:
server: https://kubernetes.default.svc
namespace: gitea

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: headlamp
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://git.halis.io/athens-school/headlamp
targetRevision: master
path: manifests
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- PruneLast=true
destination:
server: https://kubernetes.default.svc
namespace: headlamp

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: homeassistant
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://git.halia.dev/athens-school/hassio.git
targetRevision: k3s
path: manifests
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- PruneLast=true
destination:
server: https://kubernetes.default.svc
namespace: home-automation

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: monitoring
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://git.halis.io/athens-school/monitoring
targetRevision: master
path: manifests
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- PruneLast=true
- Replace=true
destination:
server: https://kubernetes.default.svc
namespace: monitoring

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: synapse
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://git.halis.io/athens-school/synapse.git
targetRevision: master
path: manifests
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- PruneLast=true
destination:
server: https://kubernetes.default.svc
namespace: synapse

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vaultwarden
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://git.halis.io/athens-school/bitwarden.git
targetRevision: k3s
path: manifests
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- PruneLast=true
destination:
server: https://kubernetes.default.svc
namespace: vaultwarden

View File

@ -1,10 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cmd-params-cm
labels:
app.kubernetes.io/name: argocd-cmd-params-cm
app.kubernetes.io/part-of: argocd
data:
server.insecure: "true"

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/name: argocd-cm
app.kubernetes.io/part-of: argocd
name: argocd-cm
namespace: argocd
data:
resource.customizations: |
networking.k8s.io/Ingress:
health.lua: |
hs = {}
hs.status = "Healthy"
return hs

View File

@ -1,18 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-server
namespace: argocd
spec:
ingressClassName: nginx-internal
rules:
- host: argo.entos
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: argocd
resources:
- namespace.yaml
- config.yaml
- ingress-config.yaml
- ingress.yaml
- install.yaml

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: argocd

View File

@ -1,14 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: argocd
namespace: argocd
labels:
team: core
spec:
selector:
matchLabels:
app.kubernetes.io/name: argocd-metrics
endpoints:
- port: metrics
path: /metrics

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-production
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: tanguy.herbron@outlook.com
privateKeySecretRef:
name: letsencrypt-production
solvers:
- selector: {}
http01:
ingress:
class: nginx-external

View File

@ -1,15 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
server: https://acme-staging-v02.api.letsencrypt.org/directory
email: tanguy.herbron@outlook.com
privateKeySecretRef:
name: letsencrypt-staging
solvers:
- selector: {}
http01:
ingress:
class: nginx-external

View File

@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- https://github.com/cert-manager/cert-manager/releases/download/v1.16.2/cert-manager.yaml
- clusterissuer-staging.yaml
- clusterissuer-production.yaml

View File

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: argocd
resources:
- podmonitor.yaml

View File

@ -1,13 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: cnpg-controller-manager
namespace: cnpg-system
labels:
team: core
spec:
selector:
matchLabels:
app.kubernetes.io/name: cloudnative-pg
podMetricsEndpoints:
- port: metrics

View File

@ -1,288 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 80
targetPort: 9090
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.5.1
ports:
- containerPort: 9090
protocol: TCP
args:
- --namespace=kubernetes-dashboard
- --enable-insecure-login
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.7
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard

View File

@ -1,28 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: stripprefix
annotations:
kubernetes.io/ingress.class: "traefik"
spec:
stripPrefix:
prefixes:
- /dashboard
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: kubernetes-dashboard-ingress
namespace: kubernetes-dashboard
annotations:
"traefik.ingress.kubernetes.io/router.middlewares": default-stripprefix@kubernetescrd
spec:
entryPoints:
- web
routes:
- match: Host(`localhost`)
kind: Rule
services:
- name: kubernetes-dashboard
port: 80

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard

View File

@ -1,18 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# MetalLB installation and configuration
- github.com/metallb/metallb/config/native?ref=v0.14.3
# Traefik CRD
- https://raw.githubusercontent.com/traefik/traefik/v2.9/docs/content/reference/dynamic-configuration/kubernetes-crd-definition-v1.yml
- https://raw.githubusercontent.com/traefik/traefik/v2.9/docs/content/reference/dynamic-configuration/kubernetes-crd-rbac.yml
# Longhorn CRD
- https://raw.githubusercontent.com/longhorn/longhorn/v1.7.2/deploy/longhorn.yaml
# SOPS secrets operator CRDs
- https://raw.githubusercontent.com/isindir/sops-secrets-operator/master/config/crd/bases/isindir.github.com_sopssecrets.yaml
# Install CoudNativePG operator
- https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.20/releases/cnpg-1.19.1.yaml
patches:
- path: ./metallb-patch.yaml

View File

@ -1,13 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: speaker
namespace: metallb-system
spec:
template:
spec:
tolerations:
- key: "type"
operator: "Equal"
value: "services"
effect: "NoSchedule"

View File

@ -1,18 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# MetalLB configuration
# Miscellanous basic configuration
# NFS client configuration
# Longhorn installation and configuration
# SOPS operator for secret management on the fly
# Traefik configuration
# Argo installation and configuration
resources:
- ../../metallb
- ../../res
- ../../nfs-provisioner
- ../../longhorn
- ../../sops-operator
- ../../traefik
- ../../argo

View File

@ -1,13 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: speaker
namespace: metallb-system
spec:
template:
spec:
tolerations:
- key: "type"
operator: "Equal"
value: "services"
effect: "NoSchedule"

View File

@ -1,7 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: traefik-external
namespace: default
spec:
loadBalancerIP: 10.18.242.163

View File

@ -1,7 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: traefik-internal
namespace: default
spec:
loadBalancerIP: 10.10.0.35

View File

@ -1,20 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# MetalLB installation and configuration
- github.com/metallb/metallb/config/native?ref=v0.14.3
# Traefik CRD
#- https://raw.githubusercontent.com/traefik/traefik/v3.2/docs/content/reference/dynamic-configuration/kubernetes-crd-definition-v1.yml
#- https://raw.githubusercontent.com/traefik/traefik/v3.2/docs/content/reference/dynamic-configuration/kubernetes-crd-rbac.yml
# Cert manager CRD
- https://github.com/cert-manager/cert-manager/releases/download/v1.16.1/cert-manager.crds.yaml
# Longhorn CRD
- https://raw.githubusercontent.com/longhorn/longhorn/v1.7.2/deploy/longhorn.yaml
# SOPS secrets operator CRDs
- https://raw.githubusercontent.com/isindir/sops-secrets-operator/master/config/crd/bases/isindir.github.com_sopssecrets.yaml
# Install CoudNativePG operator
- https://github.com/cloudnative-pg/cloudnative-pg/raw/refs/heads/main/releases/cnpg-1.24.1.yaml
patches:
- path: ./metallb-patch.yaml

View File

@ -1,13 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: speaker
namespace: metallb-system
spec:
template:
spec:
tolerations:
- key: "type"
operator: "Equal"
value: "services"
effect: "NoSchedule"

View File

@ -1,19 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# MetalLB configuration
# Miscellanous basic configuration
# NFS client configuration
# Longhorn installation and configuration
# SOPS operator for secret management on the fly
# Traefik configuration
# Argo installation and configuration
resources:
- ../../metallb
- ../../res
- ../../longhorn
- ../../sops-operator
#- ../../traefik
- ../../cert-manager
- ../../argo
#- ../../calico

View File

@ -1,18 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: longhorn-frontend
namespace: longhorn-system
spec:
ingressClassName: nginx-internal
rules:
- host: longhorn.entos
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: longhorn-frontend
port:
number: 80

View File

@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ingress.yaml
- recurrent-backup.yaml
- secrets.yaml
- servicemonitor.yaml

View File

@ -1,15 +0,0 @@
apiVersion: longhorn.io/v1beta1
kind: RecurringJob
metadata:
name: hourly-backup
namespace: longhorn-system
spec:
cron: "0 * * * *"
task: backup
groups:
- standard-pvc
retain: 10
concurrency: 10
labels:
recurrence: hourly
group: standard-pvc

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: longhorn-s3-secrets
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://git.halis.io/athens-school/k3s-secrets
targetRevision: prod-migration
path: longhorn
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- PruneLast=true
destination:
server: https://kubernetes.default.svc
namespace: longhorn-system

View File

@ -1,13 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: longhorn
namespace: longhorn-system
labels:
team: core
spec:
selector:
matchLabels:
app: longhorn-manager
endpoints:
- port: manager

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
ipaddress-pools:
- name: default
addresses:
- 10.20.0.0/24
- 51.15.80.73/32

View File

@ -1,9 +0,0 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: base
namespace: metallb-system
spec:
addresses:
- 51.15.80.73/32
- 10.10.0.0/24

View File

@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- configmap.yaml
- ipaddresspool.yaml
- l2advertisement.yaml

View File

@ -1,8 +0,0 @@
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
namespace: metallb-system
name: l2advertisement
spec: {}
#ipAddressPools:
# - default

View File

@ -1,35 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: minecraft
spec:
replicas: 1
selector:
matchLabels:
app: minecraft
template:
metadata:
labels:
app: minecraft
spec:
containers:
- name: minecraft
image: itzg/minecraft-server
ports:
- containerPort: 25565
protocol: TCP
env:
- name: EULA
value: "TRUE"
volumeMounts:
- name: minecraft-data
mountPath: /data/world
subPath: world
volumes:
- name: minecraft-data
persistentVolumeClaim:
claimName: minecraft-pvc
nodeSelector:
kubernetes.io/hostname: "archimedes"
securityContext:
fsGroup: 1000

View File

@ -1,13 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: minecrafttcp
spec:
entryPoints:
- minecrafttcp
routes:
- match: HostSNI(`*`)
services:
- name: minecraft-svc-tcp
port: 25565

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minecraft-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: flat-storage-class

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: minecraft-svc-tcp
spec:
type: ClusterIP
ports:
- protocol: TCP
port: 25565
selector:
app: minecraft

View File

@ -1,7 +0,0 @@
namespace: nfs-provisioner
bases:
- github.com/kubernetes-sigs/nfs-subdir-external-provisioner/deploy
resources:
- namespace.yaml
patchesStrategicMerge:
- patch_nfs_details.yaml

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: nfs-provisioner

View File

@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nfs-client-provisioner
name: nfs-client-provisioner
spec:
template:
spec:
containers:
- name: nfs-client-provisioner
env:
- name: NFS_SERVER
value: 192.168.56.200
- name: NFS_PATH
value: /export/Bulk
volumes:
- name: nfs-client-root
nfs:
server: 192.168.56.200
path: /export/Bulk

View File

@ -1,678 +0,0 @@
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress
namespace: nginx-ingress
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-admission
namespace: nginx-ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress
namespace: nginx-ingress
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resourceNames:
- nginx-external-ingress-leader
resources:
- leases
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-admission
namespace: nginx-ingress
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-admission
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress
namespace: nginx-ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-external-ingress
subjects:
- kind: ServiceAccount
name: nginx-external-ingress
namespace: nginx-ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-admission
namespace: nginx-ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-external-ingress-admission
subjects:
- kind: ServiceAccount
name: nginx-external-ingress-admission
namespace: nginx-ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-external-ingress
subjects:
- kind: ServiceAccount
name: nginx-external-ingress
namespace: nginx-ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-admission
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-external-ingress-admission
subjects:
- kind: ServiceAccount
name: nginx-external-ingress-admission
namespace: nginx-ingress
---
apiVersion: v1
data:
allow-snippet-annotations: "true"
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-controller
namespace: nginx-ingress
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-controller
namespace: nginx-ingress
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- appProtocol: http
name: http
port: 80
protocol: TCP
targetPort: http
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
- name: prometheus
port: 10254
protocol: TCP
targetPort: prometheus
selector:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
type: NodePort
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-controller-admission
namespace: nginx-ingress
spec:
ports:
- appProtocol: https
name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-controller
namespace: nginx-ingress
spec:
minReadySeconds: 0
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: ingress
operator: In
values:
- external
tolerations:
- key: "type"
operator: "Equal"
value: "services"
effect: "NoSchedule"
containers:
- args:
- /nginx-ingress-controller
- --election-id=nginx-external-ingress-leader
- --controller-class=k8s.io/nginx-external-ingress
- --ingress-class=nginx-external
- --configmap=$(POD_NAMESPACE)/nginx-external-ingress-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
- --enable-metrics=true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
image: registry.k8s.io/ingress-nginx/controller:v1.11.3@sha256:d56f135b6462cfc476447cfe564b83a45e8bb7da2774963b00d12161112270b7
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: controller
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 8443
name: webhook
protocol: TCP
- containerPort: 10254
name: prometheus
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 90Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: false
runAsNonRoot: true
runAsUser: 101
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: nginx-external-ingress
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: nginx-external-ingress-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-admission-create
namespace: nginx-ingress
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-admission-create
spec:
containers:
- args:
- create
- --host=nginx-external-ingress-controller-admission,nginx-external-ingress-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=nginx-external-ingress-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4@sha256:a9f03b34a3cbfbb26d103a14046ab2c5130a80c3d69d526ff8063d2b37b9fd3f
imagePullPolicy: IfNotPresent
name: create
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: nginx-external-ingress-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-admission-patch
namespace: nginx-ingress
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-admission-patch
spec:
containers:
- args:
- patch
- --webhook-name=nginx-external-ingress-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=nginx-external-ingress-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4@sha256:a9f03b34a3cbfbb26d103a14046ab2c5130a80c3d69d526ff8063d2b37b9fd3f
imagePullPolicy: IfNotPresent
name: patch
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: nginx-external-ingress-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external
spec:
controller: k8s.io/nginx-external-ingress
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
app.kubernetes.io/part-of: nginx-external-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-external-ingress-admission
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: nginx-external-ingress-controller-admission
namespace: nginx-ingress
path: /networking/v1/ingresses
port: 443
failurePolicy: Fail
matchPolicy: Equivalent
name: validate.nginx.ingress.kubernetes.io
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
sideEffects: None

View File

@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deploy.yaml
- loadbalancer.yaml
- networkpolicy.yaml

View File

@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-external-ingress-controller-loadbalancer
namespace: nginx-ingress
spec:
selector:
app.kubernetes.io/component: controller-external
app.kubernetes.io/instance: nginx-external-ingress
app.kubernetes.io/name: nginx-external-ingress
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
type: LoadBalancer
externalTrafficPolicy: Local

View File

@ -1,28 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: nginx-scrapper-blocker
namespace: nginx-ingress
spec:
podSelector: {} # Applies to all pods in the namespace
ingress:
- from:
- ipBlock:
cidr: 0.0.0.0/0
except:
- 57.141.0.0/24 # Facebook crawler
- 85.208.96.0/24 # Semrush crawler
- 185.191.171.0/24 # Random crawler
- 44.192.0.0/10 # AWS crawler
- 3.0.0.0/9 # AWS crawler
- 34.192.0.0/10 # AWS crawler
- 100.24.0.0/13 # AWS crawler
- 216.244.64.0/19 # Random crawler
- 54.224.0.0/11 # Random crawler
ports:
- protocol: TCP
port: 80
- protocol: TCP
port: 443
- protocol: TCP
port: 8443

View File

@ -1,14 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: nginx-external
namespace: nginx-ingress
labels:
app.kubernetes.io/name: nginx-external-ingress
spec:
selector:
matchLabels:
app.kubernetes.io/name: nginx-external-ingress
endpoints:
- port: prometheus
path: /metrics

View File

@ -1,678 +0,0 @@
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress
namespace: nginx-ingress
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-admission
namespace: nginx-ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress
namespace: nginx-ingress
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resourceNames:
- nginx-internal-ingress-leader
resources:
- leases
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-admission
namespace: nginx-ingress
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-admission
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress
namespace: nginx-ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-internal-ingress
subjects:
- kind: ServiceAccount
name: nginx-internal-ingress
namespace: nginx-ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-admission
namespace: nginx-ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-internal-ingress-admission
subjects:
- kind: ServiceAccount
name: nginx-internal-ingress-admission
namespace: nginx-ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-internal-ingress
subjects:
- kind: ServiceAccount
name: nginx-internal-ingress
namespace: nginx-ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-admission
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-internal-ingress-admission
subjects:
- kind: ServiceAccount
name: nginx-internal-ingress-admission
namespace: nginx-ingress
---
apiVersion: v1
data:
allow-snippet-annotations: "false"
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-controller
namespace: nginx-ingress
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-controller
namespace: nginx-ingress
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- appProtocol: http
name: http
port: 80
protocol: TCP
targetPort: http
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
- name: prometheus
port: 10254
protocol: TCP
targetPort: prometheus
selector:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
type: NodePort
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-controller-admission
namespace: nginx-ingress
spec:
ports:
- appProtocol: https
name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-controller
namespace: nginx-ingress
spec:
minReadySeconds: 0
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: ingress
operator: In
values:
- internal
tolerations:
- key: "type"
operator: "Equal"
value: "services"
effect: "NoSchedule"
containers:
- args:
- /nginx-ingress-controller
- --election-id=nginx-internal-ingress-leader
- --controller-class=k8s.io/nginx-internal-ingress
- --ingress-class=nginx-internal
- --configmap=$(POD_NAMESPACE)/nginx-internal-ingress-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
- --enable-metrics=true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
image: registry.k8s.io/ingress-nginx/controller:v1.11.3@sha256:d56f135b6462cfc476447cfe564b83a45e8bb7da2774963b00d12161112270b7
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: controller
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 8443
name: webhook
protocol: TCP
- containerPort: 10254
name: prometheus
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 90Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: false
runAsNonRoot: true
runAsUser: 101
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: nginx-internal-ingress
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: nginx-internal-ingress-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-admission-create
namespace: nginx-ingress
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-admission-create
spec:
containers:
- args:
- create
- --host=nginx-internal-ingress-controller-admission,nginx-internal-ingress-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=nginx-internal-ingress-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4@sha256:a9f03b34a3cbfbb26d103a14046ab2c5130a80c3d69d526ff8063d2b37b9fd3f
imagePullPolicy: IfNotPresent
name: create
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: nginx-internal-ingress-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-admission-patch
namespace: nginx-ingress
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-admission-patch
spec:
containers:
- args:
- patch
- --webhook-name=nginx-internal-ingress-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=nginx-internal-ingress-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4@sha256:a9f03b34a3cbfbb26d103a14046ab2c5130a80c3d69d526ff8063d2b37b9fd3f
imagePullPolicy: IfNotPresent
name: patch
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: nginx-internal-ingress-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal
spec:
controller: k8s.io/nginx-internal-ingress
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
app.kubernetes.io/part-of: nginx-internal-ingress
app.kubernetes.io/version: 1.11.3
name: nginx-internal-ingress-admission
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: nginx-internal-ingress-controller-admission
namespace: nginx-ingress
path: /networking/v1/ingresses
port: 443
failurePolicy: Fail
matchPolicy: Equivalent
name: validate.nginx.ingress.kubernetes.io
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
sideEffects: None

View File

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deploy.yaml
- loadbalancer.yaml

View File

@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-internal-ingress-controller-loadbalancer
namespace: nginx-ingress
spec:
selector:
app.kubernetes.io/component: controller-internal
app.kubernetes.io/instance: nginx-internal-ingress
app.kubernetes.io/name: nginx-internal-ingress
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: 10.10.0.16

View File

@ -1,14 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: nginx-internal
namespace: nginx-ingress
labels:
app.kubernetes.io/name: nginx-internal-ingress
spec:
selector:
matchLabels:
app.kubernetes.io/name: nginx-internal-ingress
endpoints:
- port: prometheus
path: /metrics

View File

@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- internal
- external

View File

@ -1,7 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: nginx-ingress
app.kubernetes.io/name: nginx-ingress
name: nginx-ingress

View File

@ -1,200 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: chart-owncloud-ocis
namespace: drive
labels:
app.kubernetes.io/instance: chart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: owncloud-ocis
app.kubernetes.io/version: 2.0.0
helm-revision: "1"
helm.sh/chart: owncloud-ocis-8.1.1
annotations:
rollme: oHHac
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: owncloud-ocis
app.kubernetes.io/instance: chart
template:
metadata:
annotations:
labels:
app.kubernetes.io/name: owncloud-ocis
app.kubernetes.io/instance: chart
spec:
serviceAccountName: default
securityContext:
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 568
runAsUser: 568
supplementalGroups:
- 568
dnsPolicy: ClusterFirst
dnsConfig:
options:
- name: ndots
value: "1"
enableServiceLinks: false
terminationGracePeriodSeconds: 10
initContainers:
- name: prepare
image: tccr.io/truecharts/multi-init:v0.0.1@sha256:4c2caebee117b055f379377fd0fd306f2ee0e6697fd47d9364073e0e9e6f2e02
securityContext:
runAsUser: 0
resources:
limits:
cpu: 4000m
memory: 8Gi
requests:
cpu: 10m
memory: 50Mi
env:
command:
- "/bin/sh"
- "-c"
- |
/bin/bash <<'EOF'
echo "Automatically correcting permissions..."
EOF
volumeMounts:
- command:
- /bin/sh
- -c
- |
if test -f /etc/ocis/ocis.yaml; then exit 0; fi &&
/usr/bin/ocis init -f --insecure yes > /etc/ocis/password
image: 'tccr.io/truecharts/ocis:2.0.0@sha256:208f181966b2ef710633e17c27d143e866f40e186c67dd1c3f78748f6f871e82'
name: init
volumeMounts:
- mountPath: /etc/ocis
name: config
nodeName: slave-1
containers:
- name: chart-owncloud-ocis
image: tccr.io/truecharts/ocis:2.0.0@sha256:208f181966b2ef710633e17c27d143e866f40e186c67dd1c3f78748f6f871e82
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
add: []
drop: []
privileged: false
readOnlyRootFilesystem: false
runAsNonRoot: true
env:
- name: UMASK
value: "2"
- name: UMASK_SET
value: "2"
- name: S6_READ_ONLY_ROOT
value: "1"
- name: NVIDIA_VISIBLE_DEVICES
value: "void"
- name: TZ
value: "Europe/Copenhagen"
- name: "ACCOUNTS_DEMO_USERS_AND_GROUPS"
value: "false"
- name: "OCIS_INSECURE"
value: "true"
- name: "OCIS_JWT_SECRET"
valueFrom:
secretKeyRef:
key: OCIS_JWT_SECRET
name: ocis-secrets
- name: "OCIS_LOG_COLOR"
value: "true"
- name: "OCIS_LOG_PRETTY"
value: "true"
- name: "OCIS_MACHINE_AUTH_API_KEY"
valueFrom:
secretKeyRef:
key: OCIS_MACHINE_AUTH_API_KEY
name: ocis-secrets
- name: "OCIS_URL"
value: "https://drive.beta.halia.dev"
- name: "PROXY_HTTP_ADDR"
value: "0.0.0.0:9200"
- name: "PROXY_TLS"
value: "false"
- name: "STORAGE_TRANSFER_SECRET"
valueFrom:
secretKeyRef:
key: STORAGE_TRANSFER_SECRET
name: ocis-secrets
envFrom:
ports:
- name: main
containerPort: 9200
protocol: TCP
volumeMounts:
- mountPath: /etc/ocis
name: config
- mountPath: /var/lib/ocis
name: data
- mountPath: /shared
name: shared
- mountPath: /tmp
name: temp
- mountPath: /var/logs
name: varlogs
livenessProbe:
tcpSocket:
port: 9200
initialDelaySeconds: 10
failureThreshold: 5
timeoutSeconds: 5
periodSeconds: 10
readinessProbe:
tcpSocket:
port: 9200
initialDelaySeconds: 10
failureThreshold: 5
timeoutSeconds: 5
periodSeconds: 10
startupProbe:
tcpSocket:
port: 9200
initialDelaySeconds: 10
failureThreshold: 60
timeoutSeconds: 2
periodSeconds: 5
resources:
limits:
cpu: 4000m
memory: 8Gi
requests:
cpu: 10m
memory: 50Mi
volumes:
- name: config
persistentVolumeClaim:
claimName: chart-owncloud-ocis-config
- name: data
persistentVolumeClaim:
claimName: chart-owncloud-ocis-data
- name: shared
emptyDir:
{}
- name: temp
emptyDir:
{}
- name: varlogs
emptyDir:
{}

View File

@ -1,23 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ocis-ingress
namespace: drive
annotations:
kubernetes.io/ingress.class: "traefik"
spec:
tls:
- secretName: ocis-beta-tls
hosts:
- drive.beta.halia.dev
rules:
- host: drive.beta.halia.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: chart-owncloud-ocis
port:
number: 9200

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: drive

View File

@ -1,19 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: chart-owncloud-ocis-config
namespace: drive
labels:
helm.sh/chart: owncloud-ocis-8.1.1
app.kubernetes.io/name: owncloud-ocis
app.kubernetes.io/instance: chart
helm-revision: "1"
app.kubernetes.io/version: "2.0.0"
app.kubernetes.io/managed-by: Helm
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "200Mi"
storageClassName: flat-storage-class

View File

@ -1,19 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: chart-owncloud-ocis-data
namespace: drive
labels:
helm.sh/chart: owncloud-ocis-8.1.1
app.kubernetes.io/name: owncloud-ocis
app.kubernetes.io/instance: chart
helm-revision: "1"
app.kubernetes.io/version: "2.0.0"
app.kubernetes.io/managed-by: Helm
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: flat-storage-class

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: chart-owncloud-ocis
namespace: drive
labels:
app.kubernetes.io/instance: chart
app.kubernetes.io/name: owncloud-ocis
spec:
ports:
- port: 9200
targetPort: 9200
protocol: TCP
name: http
selector:
app.kubernetes.io/name: owncloud-ocis
app.kubernetes.io/instance: chart

View File

@ -1,9 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: flat-storage-class
provisioner: driver.longhorn.io
allowVolumeExpansion: true
parameters:
numberOfReplicas: '1'
staleReplicaTimeout: "2880"

View File

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- flat-sc.yaml
- redundant-sc.yaml

View File

@ -1,9 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: redundant-storage-class
provisioner: driver.longhorn.io
allowVolumeExpansion: true
parameters:
numberOfReplicas: '3'
staleReplicaTimeout: "2880"

View File

@ -1,75 +0,0 @@
---
# Source: sops-secrets-operator/templates/cluster_role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: sops-sops-secrets-operator
namespace: sops
labels:
app.kubernetes.io/name: sops-secrets-operator
helm.sh/chart: sops-secrets-operator-0.14.1
app.kubernetes.io/instance: sops
app.kubernetes.io/version: "0.8.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- '*'
- apiGroups:
- ""
resources:
- configmaps
- secrets
verbs:
- '*'
- apiGroups:
- ""
resources:
- secrets/status
verbs:
- get
- patch
- update
- apiGroups:
- events.k8s.io
- ""
resources:
- events
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- get
- create
- apiGroups:
- isindir.github.com
resources:
- sopssecrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- isindir.github.com
resources:
- sopssecrets/finalizers
verbs:
- update
- apiGroups:
- isindir.github.com
resources:
- sopssecrets/status
verbs:
- get
- patch
- update

View File

@ -1,21 +0,0 @@
---
# Source: sops-secrets-operator/templates/cluster_role_binding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: sops-sops-secrets-operator
namespace: sops
labels:
app.kubernetes.io/name: sops-secrets-operator
helm.sh/chart: sops-secrets-operator-0.14.1
app.kubernetes.io/instance: sops
app.kubernetes.io/version: "0.8.1"
app.kubernetes.io/managed-by: Helm
subjects:
- kind: ServiceAccount
name: sops-sops-secrets-operator
namespace: sops
roleRef:
kind: ClusterRole
name: sops-sops-secrets-operator
apiGroup: rbac.authorization.k8s.io

View File

@ -1,9 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- cluster_role_binding.yaml
- cluster_role.yaml
- operator.yaml
- service_account.yaml

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: sops

View File

@ -1,74 +0,0 @@
---
# Source: sops-secrets-operator/templates/operator.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sops-sops-secrets-operator
namespace: sops
labels:
app.kubernetes.io/name: sops-secrets-operator
helm.sh/chart: sops-secrets-operator-0.14.1
app.kubernetes.io/instance: sops
app.kubernetes.io/version: "0.8.1"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: sops-secrets-operator
app.kubernetes.io/instance: sops
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: sops-secrets-operator
labels:
control-plane: controller-sops-secrets-operator
app.kubernetes.io/name: sops-secrets-operator
app.kubernetes.io/instance: sops
spec:
serviceAccountName: sops-sops-secrets-operator
containers:
- name: sops-secrets-operator
image: "isindir/sops-secrets-operator:0.8.1"
imagePullPolicy: Always
volumeMounts:
- name: age-key
mountPath: /sops
readOnly: true
command:
- /usr/local/bin/manager
args:
# The address the metric endpoint binds to. (default ":8080")
#- "--metrics-bind-address=127.0.0.1:8080"
- "--health-probe-bind-address=:8081"
# Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.
- "--leader-elect"
- "--requeue-decrypt-after=5"
- "--zap-encoder=json"
- "--zap-log-level=info"
- "--zap-stacktrace-level=error"
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: SOPS_AGE_KEY_FILE
value: "/sops/key.txt"
resources:
{}
volumes:
- name: age-key
secret:
secretName: age-key

View File

@ -1,13 +0,0 @@
---
# Source: sops-secrets-operator/templates/service_account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: sops-sops-secrets-operator
namespace: sops
labels:
app.kubernetes.io/name: sops-secrets-operator
helm.sh/chart: sops-secrets-operator-0.14.1
app.kubernetes.io/instance: sops
app.kubernetes.io/version: "0.8.1"
app.kubernetes.io/managed-by: Helm

View File

@ -1,43 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyfin
namespace: streaming
spec:
replicas: 1
selector:
matchLabels:
app: jellyfin
template:
metadata:
labels:
app: jellyfin
spec:
hostname: jellyfin
subdomain: jellyfin
containers:
- name: jellyfin
image: jellyfin/jellyfin:10.8.9
ports:
- containerPort: 8096
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: "Europe/Copenhagen"
volumeMounts:
- mountPath: "/config"
name: jellyfin-pvc
- mountPath: "/shows"
name: jellyfin-shows
volumes:
- name: jellyfin-pvc
persistentVolumeClaim:
claimName: jellyfin-pvc
- name: jellyfin-shows
persistentVolumeClaim:
claimName: jellyfin-shows
nodeSelector:
type: "wide"

View File

@ -1,31 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyfin-ingress
namespace: streaming
annotations:
kubernetes.io/ingress.class: "traefik"
ingress.kubernetes.io/rewrite-target: /
spec:
tls:
- secretName: jellyfin-beta-tls
hosts:
- stream.beta.halia.dev
rules:
- host: stream.beta.halia.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jellyfin-svc
port:
number: 80
- path: /metrics
pathType: Prefix
backend:
service:
name: jellyfin-svc
port:
number: 80

View File

@ -1,9 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- pvc.yaml
- service.yaml
- ingress.yaml
- configmap.yaml
- deployment.yaml

View File

@ -1,9 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: metrics-blocker
namespace: streaming
spec:
replacePathRegex:
regex: ^/metrics
replacement: /

View File

@ -1,49 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-pvc
namespace: streaming
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: flat-storage-class
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-shows
namespace: streaming
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy:
mountOptions:
- hard
- nfsvers=4.1
nfs:
path: /mnt/Alpha/Beta
server: 10.10.0.18
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-shows
namespace: streaming
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
storageClassName: ""
volumeName: nfs-shows
volumeMode: Filesystem

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: jellyfin-svc
namespace: streaming
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8096
selector:
app: jellyfin

View File

@ -1,16 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: traefik-ingress
namespace: streaming
spec:
entryPoints:
- websecure
routes:
- match: Host(`stream.beta.halia.dev`) && PathPrefix(`/`)
middlewares:
- name: metrics-blocker
kind: Rule
services:
- name: jellyfin-svc
port: 80

View File

@ -1,44 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyseerr
namespace: streaming
spec:
replicas: 1
selector:
matchLabels:
app: jellyseerr
template:
metadata:
labels:
app: jellyseerr
spec:
hostname: jellyseerr
subdomain: jellyseerr
containers:
- name: jellyseerr
image: fallenbagel/jellyseerr:latest
lifecycle:
postStart:
exec:
command:
['/bin/sh', '-c',
'mkdir -p /app/config &&
cp /app/config/settings.template.json /app/config/settings.json
']
ports:
- containerPort: 5055
env:
- name: TZ
value: "Europe/Copenhagen"
- name: LOG_LEVEL
value: "debug"
volumeMounts:
- name: jellyseer-config-volume
mountPath: /app/config/settings.template.json
subPath: settings.template.json
volumes:
- name: jellyseer-config-volume
configMap:
defaultMode: 420
name: jellyseer-config

View File

@ -1,23 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyseerr-ingress
namespace: streaming
annotations:
kubernetes.io/ingress.class: "traefik"
spec:
tls:
- secretName: jellyseerr-beta-tls
hosts:
- request.beta.halia.dev
rules:
- host: request.beta.halia.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jellyseerr-svc
port:
number: 80

View File

@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- service.yaml
- ingress.yaml
- configmap.yaml
- deployment.yaml

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: jellyseerr-svc
namespace: streaming
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 5055
selector:
app: jellyseerr

View File

@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ./jellyfin
- ./jellyseerr

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: streaming

View File

@ -1,20 +0,0 @@
---
# Source: traefik/templates/dashboard-hook-ingressroute.yaml
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard
annotations:
helm.sh/hook: "post-install,post-upgrade"
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik
spec:
entryPoints:
- traefik
routes:
- match: PathPrefix(`/dashboard`) || PathPrefix(`/api`)
kind: Rule
services:
- name: api@internal
kind: TraefikService

View File

@ -1,22 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: traefik-dashboard-ingress
annotations:
kubernetes.io/ingress.class: "traefik-inter"
spec:
tls:
- secretName: traefik-dashboard-beta-tls
hosts:
- traefik.beta.entos
rules:
- host: traefik.beta.entos
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: traefik-dashboard-svc
port:
number: 80

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: traefik-dashboard-svc
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app.kubernetes.io/name: traefik-inter
app.kubernetes.io/instance: treafik-inter

View File

@ -1,16 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard
namespace: traefik
annotations:
kubernetes.io/ingress.class: "traefik-inter"
spec:
entryPoints:
- websecure
routes:
- kind: Rule
match: Host(`traefik.beta.entos`)
services:
- name: api@internal
kind: TraefikService

View File

@ -1,230 +0,0 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "${flannel_cidr}",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq=false
- --kube-subnet-mgr
- --iface=${interface}
resources:
requests:
cpu: "50m"
memory: "50Mi"
limits:
cpu: "50m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---

View File

@ -1,9 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: traefik
resources:
- namespace.yaml
- rbac
- traefik-internal
- traefik-external

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: traefik

View File

@ -1,60 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik
rules:
- apiGroups:
- ""
resources:
- services
- secrets
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- traefik.io
resources:
- middlewares
- middlewaretcps
- ingressroutes
- traefikservices
- ingressroutetcps
- ingressrouteudps
- tlsoptions
- tlsstores
- serverstransports
- serverstransporttcps
verbs:
- get
- list
- watch

View File

@ -1,16 +0,0 @@
---
# Source: traefik/templates/rbac/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik
subjects:
- kind: ServiceAccount
name: traefik

Some files were not shown because too many files have changed in this diff Show More