new cluster deployment

This commit is contained in:
auricom
2022-10-26 13:41:02 +02:00
parent 0af3d7cc1f
commit 67aafea749
55 changed files with 328 additions and 227 deletions

4
.gitignore vendored
View File

@@ -1,2 +1,4 @@
### SOPS ###
.decrypted~*.yaml
.decrypted~*.yaml
### KUBECONFIG ###
provision

View File

@@ -23,23 +23,16 @@
## Bootstrap Flux
```bash
flux bootstrap github \
--version=latest \
--owner=auricom \
--repository=home-ops \
--path=cluster/base \
--personal \
--network-policy=false
kubectl apply -k cluster/bootstrap/
```
## SOPS secret from age key
```bash
age-keygen -o $HOME/sops/age/keys.txt
cat $HOME/sops/age/keys.txt |
cat ~/.config/sops/age/keys.txt |
kubectl create secret generic sops-age \
--namespace=flux-system \
--from-file=$HOME/sops/age/keys.txt=/dev/stdin
--namespace=flux-system \
--from-file=age.agekey=/dev/stdin
```
## Encrypt kubernetes resources with sops binary

View File

@@ -34,7 +34,7 @@ k3s_server_manifests_templates:
# -- /var/lib/rancher/k3s/server/manifests
k3s_server_manifests_urls:
- url: https://docs.projectcalico.org/archive/v3.23/manifests/tigera-operator.yaml
- url: https://docs.projectcalico.org/archive/v3.24/manifests/tigera-operator.yaml
filename: tigera-operator.yaml
# -- /etc/rancher/k3s/registries.yaml

View File

@@ -17,4 +17,3 @@ k3s_agent:
- "max-pods=150"
node-label:
- "upgrade.cattle.io/plan=k3s-agent"
- "node-role.kubernetes.io/worker=true"

View File

@@ -24,7 +24,7 @@ spec:
dependsOn:
- name: glauth
namespace: default
- name: postgres
- name: postgres-cluster
namespace: default
- name: redis
namespace: default

View File

@@ -53,7 +53,7 @@ spec:
maxParallel: 8
destinationPath: s3://postgresql/
endpointURL: https://truenas.${SECRET_DOMAIN}:9000
serverName: postgres
serverName: postgres-v2
s3Credentials:
accessKeyId:
name: postgres-minio
@@ -61,3 +61,20 @@ spec:
secretAccessKey:
name: postgres-minio
key: MINIO_SECRET_KEY
bootstrap:
recovery:
source: postgres
externalClusters:
- name: postgres
barmanObjectStore:
destinationPath: s3://postgresql/
endpointURL: https://truenas.${SECRET_DOMAIN}:9000
s3Credentials:
accessKeyId:
name: postgres-minio
key: MINIO_ACCESS_KEY
secretAccessKey:
name: postgres-minio
key: MINIO_SECRET_KEY
wal:
maxParallel: 8

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
values:
resources:

View File

@@ -23,7 +23,7 @@ spec:
retries: 5
values:
crds:
create: false
create: true
config:
data:
INHERITED_ANNOTATIONS: kyverno.io/ignore

View File

@@ -21,6 +21,9 @@ spec:
upgrade:
remediation:
retries: 5
dependsOn:
- name: postgres-cluster
namespace: default
values:
image:
repository: gitea/gitea

View File

@@ -5,3 +5,5 @@ resources:
- volume.yaml
- helm-release.yaml
- external-backup
patchesStrategicMerge:
- patches/postgres.yaml

View File

@@ -0,0 +1,31 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: gitea
namespace: default
spec:
values:
initContainers:
init-db:
image: ghcr.io/onedr0p/postgres-initdb:14.5
env:
- name: POSTGRES_HOST
value: postgres-rw.default.svc.cluster.local.
- name: POSTGRES_DB
value: gitea
- name: POSTGRES_SUPER_PASS
valueFrom:
secretKeyRef:
name: postgres-superuser
key: password
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: gitea-config
key: dbUser
- name: POSTGRES_PASS
valueFrom:
secretKeyRef:
name: gitea-config
key: dbPassword

View File

@@ -21,13 +21,13 @@
}
```
2. Create the outline user and password
2. Create the gitea user and password
```sh
mc admin user add minio gitea <super-secret-password>
```
3. Create the outline bucket
3. Create the gitea bucket
```sh
mc mb minio/gitea

View File

@@ -24,7 +24,7 @@ spec:
dependsOn:
- name: emqx
namespace: default
- name: postgres
- name: postgres-cluster
namespace: default
values:
image:

View File

@@ -25,10 +25,10 @@ spec:
controller:
resources:
requests:
cpu: 126m
cpu: 200m
memory: 105M
limits:
cpu: 126m
cpu: 300m
memory: 105M
dashboard:
replicaCount: 1

View File

@@ -27,8 +27,6 @@ spec:
worker:
annotations:
configmap.reloader.stakater.com/reload: node-feature-discovery-worker-conf
nodeSelector:
node-role.kubernetes.io/worker: "true"
config:
core:
sources:

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
- name: redis
namespace: default

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
values:
image:

View File

@@ -17,11 +17,10 @@ spec:
interval: 15m
install:
createNamespace: true
crds: CreateReplace
remediation:
retries: 3
values:
installCRDs: false
installCRDs: true
webhook:
enabled: true
extraArgs:

View File

@@ -3,7 +3,7 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cert-manager-issuers
namespace: cert-manager
namespace: default
spec:
interval: 15m
chart:

View File

@@ -2,8 +2,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- secret.sops.yaml
- helm-release.yaml
- rbac.yaml
- webhook-ovh
- issuers
- certificates
- prometheus-rule.yaml

View File

@@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
labels:
kustomize.toolkit.fluxcd.io/prune: disabled
goldilocks.fairwinds.com/enabled: "true"

View File

@@ -0,0 +1,25 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager:ovh-dns-challenge
namespace: default
rules:
- apiGroups: ["${SECRET_DOMAIN}"]
resources: ["ovh"]
verbs: ["get", "watch", "list", "create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager:ovh-dns-challenge
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager:ovh-dns-challenge
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager
namespace: default

View File

@@ -3,4 +3,3 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- monitoring
- profiles

View File

@@ -1,20 +0,0 @@
---
kind: Profile
apiVersion: config.kio.kasten.io/v1alpha1
metadata:
name: k10-disaster-recovery
namespace: kasten-io
spec:
locationSpec:
type: FileStore
fileStore:
claimName: nfs-backups-kubernetes
path: k10-disaster-recovery
credential:
secretType: ""
secret:
apiVersion: ""
kind: ""
name: ""
namespace: ""
type: Location

View File

@@ -1,7 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- volumes.yaml
- k10-disaster-discovery.yaml
- pvc-export.yaml

View File

@@ -1,20 +0,0 @@
---
kind: Profile
apiVersion: config.kio.kasten.io/v1alpha1
metadata:
name: pvc-export
namespace: kasten-io
spec:
locationSpec:
type: FileStore
fileStore:
claimName: nfs-backups-kubernetes
path: pvc-export
credential:
secretType: ""
secret:
apiVersion: ""
kind: ""
name: ""
namespace: ""
type: Location

View File

@@ -1,35 +0,0 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-backups-kubernetes
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-backups-kubernetes
spec:
storageClassName: nfs-backups-kubernetes
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/backups/kubernetes
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backups-kubernetes
namespace: kasten-io
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-backups-kubernetes
resources:
requests:
storage: 1Mi

View File

@@ -72,10 +72,10 @@ spec:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values: ["home-assistant"]
values: ["homer"]
- key: app.kubernetes.io/instance
operator: In
values: ["home-assistant"]
values: ["homer"]
topologyKey: kubernetes.io/hostname
resources:
requests:

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
values:
image:

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
values:
global:

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
values:
image:

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
values:
image:

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
values:
controller:

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
values:
image:

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
values:
image:

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
values:
controller:

View File

@@ -22,7 +22,7 @@ spec:
remediation:
retries: 5
dependsOn:
- name: postgres
- name: postgres-cluster
namespace: default
- name: redis
namespace: default

View File

@@ -0,0 +1,16 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- github.com/fluxcd/flux2/manifests/install?ref=v0.36.0
patches:
- target:
group: networking.k8s.io
version: v1
kind: NetworkPolicy
patch: |-
$patch: delete
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: all

View File

@@ -25,6 +25,9 @@ spec:
crds: CreateReplace
remediation:
retries: 3
dependsOn:
- name: rook-ceph-cluster
namespace: rook-ceph
values:
eula:
accept: true

View File

@@ -3,4 +3,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- secret.sops.yaml
- helm-release.yaml
- profiles

View File

@@ -0,0 +1,98 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app kasten-k10-profiles
namespace: &namespace kasten-io
spec:
interval: 15m
chart:
spec:
chart: raw
version: v0.3.1
sourceRef:
kind: HelmRepository
name: dysnix-charts
namespace: flux-system
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
dependsOn:
- name: k10
namespace: *namespace
values:
resources:
- apiVersion: config.kio.kasten.io/v1alpha1
kind: Profile
metadata:
name: k10-disaster-recovery
namespace: *namespace
spec:
locationSpec:
type: FileStore
fileStore:
claimName: nfs-backups-kubernetes
path: k10-disaster-recovery
credential:
secretType: ""
secret:
apiVersion: ""
kind: ""
name: ""
namespace: ""
type: Location
- apiVersion: config.kio.kasten.io/v1alpha1
kind: Profile
metadata:
name: pvc-export
namespace: *namespace
spec:
locationSpec:
type: FileStore
fileStore:
claimName: nfs-backups-kubernetes
path: pvc-export
credential:
secretType: ""
secret:
apiVersion: ""
kind: ""
name: ""
namespace: ""
type: Location
- apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-backups-kubernetes
provisioner: nfs
reclaimPolicy: Retain
- apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-backups-kubernetes
spec:
storageClassName: nfs-backups-kubernetes
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/backups/kubernetes
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backups-kubernetes
namespace: *namespace
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-backups-kubernetes
resources:
requests:
storage: 1Mi

View File

@@ -2,4 +2,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- crds.yaml
- helm-release.yaml

View File

@@ -0,0 +1,29 @@
# yamllint disable
apiVersion: v1
kind: Secret
metadata:
name: k10-dr-secret
namespace: kasten-io
type: Opaque
data:
key: ENC[AES256_GCM,data:IvEWafKCr3S6bwf1plG7FC3jRZM=,iv:KiWWGBKdx06ZDFuuvIhIOc6q15aaspgAt7E9qh9RmKk=,tag:e6GaVyeG+CDzTHD4OS0/4A==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB4TWU5YTlFY3FPQWhnZ2I2
akxnZ2xIRVNFZTdOWmg0dFhxTUNoZEFIM1cwCit5WnduNlQ1MkF2aytCVldMeVlC
Yk5QNWRQRllOT3ZTL3VGcjJNK1VqeUkKLS0tIFMyWHNFd29nc2tMektxclJkK0pT
Ny9OQ0l4ZXMrdW40NmRsbzgvZ0w5V3cKqTGvN5zk2TPgtxoVfwI7Wsz4N+lC9+Kq
DCXTgTU/QXm9dvo4ErPPzeWFqdk4JchExhvSJV2JfM32O+3z+EGhNg==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-10-26T13:59:32Z"
mac: ENC[AES256_GCM,data:jnRPyEky1h4b3ZkLhx+rIPkb9aSOUnaAc1YoHniTJjGAvmLZ7cyEwWzcSd+Okz6LPMWYSIwzxoeawhV5aSPnGVbgPZpVxHUyEolDyeqTUi/IJHyF1800Yaq9N2Q4GCma/xyRu7tO7u0rcrWiMQM9jQ5yFEWCKwWxbNKyUipqvuE=,iv:/x0fryns2Ubx+2LStBnducT1i+RmOah9HM/K0EdU7H8=,tag:ot9l5331qzvmlsEdftqNpw==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.3

View File

@@ -4,4 +4,3 @@ resources:
- flux-system
- k10
- rook-ceph
- storageclasses.yaml

View File

@@ -82,12 +82,6 @@ spec:
memory: "512Mi"
limits:
memory: "6Gi"
prepareosd:
requests:
cpu: "250m"
memory: "50Mi"
limits:
memory: "200Mi"
mgr-sidecar:
requests:
cpu: "50m"

View File

@@ -16,7 +16,7 @@ spec:
namespace: flux-system
values:
crds:
enabled: false
enabled: true
pspEnable: false
monitoring:
enabled: true

View File

@@ -1,7 +0,0 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: local
provisioner: Local
reclaimPolicy: Retain

View File

@@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# renovate: datasource=docker image=quay.io/jetstack/cert-manager-controller
- https://github.com/cert-manager/cert-manager/releases/download/v1.10.0/cert-manager.crds.yaml

View File

@@ -1,30 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: GitRepository
metadata:
name: cloudnative-pg-source
namespace: flux-system
spec:
interval: 12h
url: https://github.com/cloudnative-pg/cloudnative-pg.git
ref:
# renovate: datasource=github-releases depName=cloudnative-pg/cloudnative-pg
tag: "v1.17.1"
ignore: |
# exclude all
/*
# include crd directory
!/config/crd/bases
---
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
name: crds-cloudnative-pg
namespace: flux-system
spec:
interval: 30m
prune: false
wait: true
sourceRef:
kind: GitRepository
name: cloudnative-pg-source

View File

@@ -1,8 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cert-manager
- cloudnative-pg
- external-snapshotter
- kube-prometheus-stack
- rook-ceph

View File

@@ -1,30 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: rook-ceph-source
namespace: flux-system
spec:
interval: 30m
url: https://github.com/rook/rook.git
ref:
# renovate: registryUrl=https://charts.rook.io/release chart=rook-ceph
tag: v1.10.4
ignore: |
# exclude all
/*
# path to crds
!/deploy/examples/crds.yaml
---
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
name: rook-ceph-crds
namespace: flux-system
spec:
interval: 15m
prune: false
wait: true
sourceRef:
kind: GitRepository
name: rook-ceph-source

View File

@@ -1,4 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- crds.yaml

View File

@@ -5,7 +5,7 @@ metadata:
name: flux-cluster
namespace: flux-system
spec:
interval: 10m
interval: 30m
# https://github.com/k8s-at-home/template-cluster-k3s/issues/324
url: ssh://git@github.com/auricom/home-ops
ref:
@@ -19,7 +19,7 @@ metadata:
name: flux-cluster
namespace: flux-system
spec:
interval: 10m
interval: 30m
path: ./cluster/flux
prune: true
wait: false

View File

@@ -5,7 +5,7 @@ metadata:
name: flux-installation
namespace: flux-system
spec:
interval: 10m
interval: 30m
ref:
# renovate: datasource=github-releases depName=fluxcd/flux2
tag: "v0.36.0"
@@ -22,7 +22,7 @@ metadata:
name: flux-installation
namespace: flux-system
spec:
interval: 10m
interval: 30m
path: ./manifests/install
prune: true
wait: true
@@ -39,4 +39,4 @@ spec:
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: not-used
name: all

View File

@@ -0,0 +1,60 @@
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: GitRepository
metadata:
name: kube-prometheus-stack
namespace: flux-system
spec:
interval: 12h
url: https://github.com/prometheus-community/helm-charts.git
ref:
# renovate: registryUrl=https://prometheus-community.github.io/helm-charts chart=kube-prometheus-stack
tag: kube-prometheus-stack-41.6.1
ignore: |
# exclude all
/*
# include crd directory
!/charts/kube-prometheus-stack/crds
---
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
name: kube-prometheus-stack-crds
namespace: flux-system
spec:
interval: 30m
prune: false
wait: true
sourceRef:
kind: GitRepository
name: kube-prometheus-stack
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: GitRepository
metadata:
name: kyverno
namespace: flux-system
spec:
interval: 12h
url: https://github.com/kyverno/kyverno.git
ref:
# renovate: registryUrl=https://kyverno.github.io/kyverno chart=kyverno
tag: kyverno-chart-2.6.1
ignore: |
# exclude all
/*
# include crd directory
!/config/crds
---
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
name: kyverno-crds
namespace: flux-system
spec:
interval: 30m
prune: false
wait: true
sourceRef:
kind: GitRepository
name: kyverno