feat: overhaul

This commit is contained in:
auricom
2025-01-04 00:00:04 +01:00
parent b14022014b
commit 0c9529c7a2
408 changed files with 3187 additions and 2380 deletions

View File

@@ -0,0 +1,19 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: babybuddy
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: babybuddy-secret
template:
engineVersion: v2
data:
SECRET_KEY: "{{ .BABYBUDDY_SECRET_KEY }}"
dataFrom:
- extract:
key: babybuddy

View File

@@ -4,7 +4,6 @@ apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: &app babybuddy
namespace: default
spec:
interval: 30m
chart:
@@ -32,43 +31,17 @@ spec:
babybuddy:
annotations:
reloader.stakater.com/auto: "true"
initContainers:
init-db:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: 16
envFrom: &envFrom
- secretRef:
name: babybuddy-secret
migrations:
image:
repository: ghcr.io/auricom/babybuddy
tag: 2.7.0@sha256:39bc60fb6825d5bca296c078f599e00c6b9249d55992ddfe4200e6aa0841f86a
pullPolicy: IfNotPresent
envFrom: *envFrom
command:
- /bin/bash
- -c
- |
#!/bin/bash
set -o errexit
set -o nounset
cd www/public
python3 ./manage.py migrate --noinput
python3 ./manage.py createcachetable
containers:
app:
image:
repository: ghcr.io/auricom/babybuddy
tag: 2.7.0@sha256:e112563cbd34c4283e8cf5ee756dbed695799dcefe4f035f9495beacb6415d12
repository: lscr.io/linuxserver/babybuddy
tag: 2.7.0@sha256:579e8f62bed981ed94c021de60a302ba01c22c971ba2bacfcf821650fbc89e9d
env:
TZ: ${TIMEZONE}
EMAIL_HOST: smtp-relay.default.svc.cluster.local.
EMAIL_PORT: "2525"
EMAIL_USE_TLS: "false"
envFrom: *envFrom
CSRF_TRUSTED_ORIGINS: https://{{ .Release.Name }}.${SECRET_EXTERNAL_DOMAIN}
envFrom:
- secretRef:
name: babybuddy-secret
probes:
liveness: &probes
enabled: true

View File

@@ -1,5 +1,5 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
@@ -16,6 +16,7 @@ spec:
kind: GitRepository
name: home-ops-kubernetes
dependsOn:
- name: crunchy-postgres-operator-cluster
- name: external-secrets-stores
- name: volsync
wait: false
@@ -27,4 +28,4 @@ spec:
APP: *app
VOLSYNC_CAPACITY: 2Gi
VOLSYNC_UID: "65532"
VOLSYNC_GID: "65532"
VOLSYNC_GID: "65532"

View File

@@ -4,8 +4,8 @@ kind: Cluster
metadata:
name: postgres16
spec:
instances: 4 # set to the number of nodes in the cluster
imageName: ghcr.io/cloudnative-pg/postgresql:16.2-10@sha256:82827bc9bc5ca7df1d7f7d4813444e0e7a8e32633ad72c5c66ad2be72c3b2095
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:16.2
primaryUpdateStrategy: unsupervised
storage:
size: 50Gi
@@ -37,34 +37,28 @@ spec:
wal:
compression: bzip2
maxParallel: 8
destinationPath: s3://postgresql/
endpointURL: https://s3.${SECRET_INTERNAL_DOMAIN}
destinationPath: &dest s3://postgresql/
endpointURL: &url https://s3.${SECRET_INTERNAL_DOMAIN}
# Note: serverName version needs to be inclemented
# when recovering from an existing cnpg cluster
serverName: postgres16-v4
s3Credentials:
serverName: postgres16-v5
s3Credentials: &credentials
accessKeyId:
name: cloudnative-pg-secret
key: aws-access-key-id
secretAccessKey:
name: cloudnative-pg-secret
key: aws-secret-access-key
# # Note: previousCluster needs to be set to the name of the previous
# # cluster when recovering from an existing cnpg cluster
# bootstrap:
# recovery:
# source: postgres16-v3
# externalClusters:
# - name: postgres16-v3
# barmanObjectStore:
# destinationPath: s3://postgresql/
# endpointURL: https://s3.${SECRET_INTERNAL_DOMAIN}
# s3Credentials:
# accessKeyId:
# name: cloudnative-pg-secret
# key: aws-access-key-id
# secretAccessKey:
# name: cloudnative-pg-secret
# key: aws-secret-access-key
# wal:
# maxParallel: 8
# Note: previousCluster needs to be set to the name of the previous
# cluster when recovering from an existing cnpg cluster
bootstrap:
recovery:
source: &backup postgres16-v4
externalClusters:
- name: *backup
barmanObjectStore:
destinationPath: *dest
endpointURL: *url
s3Credentials: *credentials
wal:
maxParallel: 8

View File

@@ -3,19 +3,19 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: cloudnative-pg-postgres16-pgdump
name: cloudnative-pg-postgres17-pgdump
namespace: default
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: cloudnative-pg-postgres16-pgdump-secret
name: cloudnative-pg-postgres17-pgdump-secret
template:
engineVersion: v2
data:
# App
POSTGRES_HOST: postgres16-rw.database.svc.cluster.local
POSTGRES_HOST: postgres17-rw.database.svc.cluster.local
POSTGRES_USER: "{{ .POSTGRES_SUPER_USER }}"
POSTGRES_PASSWORD: "{{ .POSTGRES_SUPER_PASS }}"
POSTGRES_PORT: "5432"

View File

@@ -3,7 +3,7 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: &app cloudnative-pg-postgres16-pgdump
name: &app cloudnative-pg-postgres17-pgdump
namespace: default
spec:
interval: 30m
@@ -29,7 +29,7 @@ spec:
keepHistory: false
values:
controllers:
cloudnative-pg-postgres16-pgdump:
cloudnative-pg-postgres17-pgdump:
type: cronjob
cronjob:
concurrencyPolicy: Forbid
@@ -38,18 +38,18 @@ spec:
init-db:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: 16
tag: 17
env:
EXCLUDE_DBS: app home_assistant lidarr_log radarr_log sonarr_log prowlarr_log postgres template0 template1
envFrom: &envFrom
- secretRef:
name: cloudnative-pg-postgres16-pgdump-secret
name: cloudnative-pg-postgres17-pgdump-secret
command: /scripts/list_dbs.sh
containers:
app:
image:
repository: prodrigestivill/postgres-backup-local
tag: 16-alpine@sha256:d41309ea4abc06b1d369927cafa7abb8b9cccab21921dcb5d765379fcd9d60cb
tag: 17-alpine@sha256:d41309ea4abc06b1d369927cafa7abb8b9cccab21921dcb5d765379fcd9d60cb
command: [/backup.sh]
env:
POSTGRES_DB_FILE: /config/db_list
@@ -79,7 +79,7 @@ spec:
scripts:
enabled: true
type: configMap
name: cloudnative-pg-postgres16-pgdump-scripts # overriden by kustomizeconfig
name: cloudnative-pg-postgres17-pgdump-scripts # overriden by kustomizeconfig
defaultMode: 0775
globalMounts:
- path: /scripts

View File

@@ -16,7 +16,7 @@ spec:
data:
# App
INVIDIOUS_CONFIG: |
database_url: postgres://{{ .POSTGRES_USER }}:{{ .POSTGRES_PASS }}@postgres16-rw.database.svc.cluster.local.:5432/invidious
database_url: postgres://{{ .POSTGRES_USER }}:{{ .POSTGRES_PASS }}@postgres17-rw.database.svc.cluster.local.:5432/invidious
check_tables: true
port: 3000
domain: invidious.${SECRET_EXTERNAL_DOMAIN}
@@ -24,7 +24,7 @@ spec:
hmac_key: {{ .HMAC_KEY }}
# Postgres Init
INIT_POSTGRES_DBNAME: invidious
INIT_POSTGRES_HOST: postgres16-rw.database.svc.cluster.local
INIT_POSTGRES_HOST: postgres17-rw.database.svc.cluster.local
INIT_POSTGRES_USER: "{{ .POSTGRES_USER }}"
INIT_POSTGRES_PASS: "{{ .POSTGRES_PASS }}"
INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}"

View File

@@ -15,7 +15,7 @@ spec:
engineVersion: v2
data:
# App
KRESUS_DB_HOST: &dbHost postgres16-rw.database.svc.cluster.local
KRESUS_DB_HOST: &dbHost postgres17-rw.database.svc.cluster.local
KRESUS_DB_USERNAME: &dbUser "{{ .POSTGRES_USERNAME }}"
KRESUS_DB_PASSWORD: &dbPass "{{ .POSTGRES_PASSWORD }}"
KRESUS_DB_NAME: &dbName kresus

View File

@@ -58,7 +58,7 @@ spec:
LANG: C.UTF-8
KRESUS_DB_TYPE: postgres
KRESUS_DIR: /config
KRESUS_EMAIL_HOST: mailrise.monitoring.svc.cluster.local
KRESUS_EMAIL_HOST: mailrise.observability.svc.cluster.local
KRESUS_EMAIL_PORT: 8025
KRESUS_EMAIL_TRANSPORT: smtp
KRESUS_EMAIL_FROM: kresus@mailrise.home.arpa

View File

@@ -16,7 +16,7 @@ spec:
data:
# App
SECRET_KEY_BASE: "{{ .MAYBE__SECRET_KEY_BASE }}"
DB_HOST: &dbHost postgres16-rw.database.svc.cluster.local
DB_HOST: &dbHost postgres17-rw.database.svc.cluster.local
POSTGRES_DB: &dbName maybe
POSTGRES_USER: &dbUser "{{ .MAYBE__POSTGRES_USER }}"
POSTGRES_PASSWORD: &dbPass "{{ .MAYBE__POSTGRES_PASS }}"

View File

@@ -2,7 +2,6 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./externalsecret.yaml
- ./helmrelease.yaml

View File

@@ -20,7 +20,7 @@ spec:
MYSQL_ROOT_PASSWORD: &dbPass "{{ .PLANTIT__MARIADB_ROOT_PASS }}"
MYSQL_USERNAME: "{{ .PLANTIT__MARIADB_USER }}"
MYSQL_PSW: *dbPass
FLORACODEX_KEY: "{{ .PLANTIT__FLORACODEX_KEY }}"
FLORACODEX_KEY: "{{ .PLANTIT__TREFLE_KEY }}"
JWT_SECRET: "{{ .PLANTIT__JWT_SECRET }}"
dataFrom:

View File

@@ -4,7 +4,6 @@ apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: thanos
namespace: monitoring
spec:
interval: 30m
timeout: 15m
@@ -57,8 +56,8 @@ spec:
config:
insecure: true
additionalEndpoints:
- dnssrv+_grpc._tcp.kube-prometheus-stack-thanos-discovery.monitoring.svc.cluster.local
additionalReplicaLabels: ["__replica__"]
- dnssrv+_grpc._tcp.kube-prometheus-stack-thanos-discovery.observability.svc.cluster.local
additionalReplicaLabels: [__replica__]
serviceMonitor:
enabled: true
compact:
@@ -86,10 +85,10 @@ spec:
configMapKeyRef:
name: &configMap thanos-cache-configmap
key: cache.yaml
extraArgs: ["--query-range.response-cache-config=$(THANOS_CACHE_CONFIG)"]
extraArgs: [--query-range.response-cache-config=$(THANOS_CACHE_CONFIG)]
ingress:
enabled: true
ingressClassName: nginx
ingressClassName: internal
annotations:
gethomepage.dev/enabled: "true"
gethomepage.dev/name: Thanos
@@ -107,13 +106,13 @@ spec:
rule:
enabled: true
replicas: 3
extraArgs: ["--web.prefix-header=X-Forwarded-Prefix"]
extraArgs: [--web.prefix-header=X-Forwarded-Prefix]
alertmanagersConfig:
value: |-
alertmanagers:
- api_version: v2
static_configs:
- dnssrv+_http-web._tcp.alertmanager-operated.monitoring.svc.cluster.local
- dnssrv+_http-web._tcp.alertmanager-operated.observability.svc.cluster.local
rules:
value: |-
groups:

View File

@@ -2,7 +2,6 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- ./objectbucketclaim.yaml
- ./helmrelease.yaml

View File

@@ -6,14 +6,15 @@ metadata:
name: &app thanos
namespace: flux-system
spec:
targetNamespace: monitoring
targetNamespace: observability
commonMetadata:
labels:
app.kubernetes.io/name: *app
dependsOn:
- name: dragonfly-cluster
- name: external-secrets-stores
- name: rook-ceph-cluster
path: ./kubernetes/apps/monitoring/thanos/app
path: ./kubernetes/apps/observability/thanos/app
prune: true
sourceRef:
kind: GitRepository

View File

@@ -0,0 +1,19 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: windmill
spec:
secretStoreRef:
kind: ClusterSecretStore
name: crunchy-pgo-secrets
target:
name: windmill-secret
template:
engineVersion: v2
data:
WINDMILL_POSTGRES_URL: 'postgres://{{ index . "user" }}:{{ index . "password" }}@{{ index . "host" }}/{{ index . "dbname" }}'
dataFrom:
- extract:
key: postgres-pguser-windmill

View File

@@ -29,37 +29,6 @@ spec:
app:
annotations:
reloader.stakater.com/auto: "true"
initContainers:
- name: init-db
image: ghcr.io/onedr0p/postgres-init:16
envFrom:
- secretRef:
name: &secret windmill-secret
- name: init-grants
image: ghcr.io/onedr0p/postgres-init:16
command:
- /bin/bash
- -c
- |
#!/bin/bash
set -o errexit
set -o nounset
/var/run/grants.sh
envFrom:
- secretRef:
name: *secret
volumeMounts:
- name: grants
readOnly: true
subPath: grants.sh
mountPath: /var/run/grants.sh
volumes:
- name: grants
configMap:
name: windmill-grants
defaultMode: 509
baseDomain: &host "windmill.${SECRET_EXTERNAL_DOMAIN}"
baseProtocol: https
appReplicas: 1
@@ -105,6 +74,6 @@ spec:
enabled: false
valuesFrom:
- kind: Secret
name: *secret
name: windmill-secret
valuesKey: WINDMILL_POSTGRES_URL
targetPath: windmill.databaseUrl

View File

@@ -0,0 +1,9 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./externalsecret.yaml
- ./helmrelease.yaml
- ../../../../templates/gatus/guarded

View File

@@ -11,6 +11,7 @@ spec:
labels:
app.kubernetes.io/name: *app
dependsOn:
- name: crunchy-postgres-operator-cluster
- name: external-secrets-stores
path: ./kubernetes/apps/default/windmill/app
prune: true

View File

@@ -0,0 +1,51 @@
---
# yaml-language-server: $schema=https://taskfile.dev/schema.json
version: '3'
vars:
BOOTSTRAP_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/bootstrap/resources'
CLUSTER_DIR: '{{.ROOT_DIR}}/kubernetes'
tasks:
base:
desc: Bootstrap Base Apps
cmds:
- until kubectl wait nodes --for=condition=Ready=False --all --timeout=10m; do sleep 5; done
- helmfile --quiet --file {{.CLUSTER_DIR}}/bootstrap/apps/helmfile.yaml apply --skip-diff-on-install --suppress-diff
- until kubectl wait nodes --for=condition=Ready --all --timeout=10m; do sleep 5; done
preconditions:
- talosctl config info
# - test -f {{.CLUSTER_DIR}}/talos/cluster-0/talosconfig
- test -f {{.CLUSTER_DIR}}/bootstrap/apps/helmfile.yaml
- which helmfile kubectl
# NOTE: Nodes must all be part of the Ceph cluster and Ceph disks must share the same disk model
rook:
desc: Bootstrap Rook-Ceph
cmds:
- minijinja-cli {{.BOOTSTRAP_RESOURCES_DIR}}/wipe-rook.yaml.j2 | kubectl apply --server-side --filename -
- until kubectl --namespace default get job/wipe-rook &>/dev/null; do sleep 5; done
- kubectl --namespace default wait job/wipe-rook --for=condition=complete --timeout=5m
- stern --namespace default job/wipe-rook --no-follow
- kubectl --namespace default delete job wipe-rook
env:
NODE_COUNT:
sh: talosctl config info --output json | jq --raw-output '.nodes | length'
preconditions:
- test -f {{.BOOTSTRAP_RESOURCES_DIR}}/wipe-rook.yaml.j2
- which jq kubectl minijinja-cli stern talosctl
flux:
desc: Bootstrap Flux
cmds:
- kubectl create namespace flux-system --dry-run=client -o yaml | kubectl apply --filename -
- cat {{.SOPS_AGE_KEY}} | kubectl --namespace flux-system create secret generic sops-age --from-file=age.agekey=/dev/stdin
- kubectl apply --server-side --kustomize {{.CLUSTER_DIR}}/bootstrap/flux
- SOPS_AGE_KEY_FILE={{.SOPS_AGE_KEY}} sops exec-file {{.CLUSTER_DIR}}/bootstrap/flux/github-deploy-key.sops.yaml "kubectl apply --server-side --filename {}"
- SOPS_AGE_KEY_FILE={{.SOPS_AGE_KEY}} sops exec-file {{.CLUSTER_DIR}}/flux/vars/cluster-secrets.sops.yaml "kubectl apply --server-side --filename {}"
- kubectl apply --server-side --filename ./flux/vars/cluster-settings.yaml
apps:
desc: Bootstrap Flux
- kubectl apply --server-side --kustomize {{.CLUSTER_DIR}}/flux/config

View File

@@ -0,0 +1,49 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: &app wipe-rook
namespace: default
labels:
app.kubernetes.io/name: *app
spec:
parallelism: 4
template:
metadata:
labels:
app.kubernetes.io/name: *app
spec:
restartPolicy: Never
containers:
- name: disk-wipe
image: rook/ceph:master
securityContext:
privileged: true
command:
[
"/bin/sh",
"-c",
"echo 'Starting disk operations on /dev/sdb'; \
echo 'Running sgdisk --zap-all /dev/sdb'; time sgdisk --zap-all /dev/sdb; \
echo 'Running dd if=/dev/zero bs=1M count=10000 oflag=direct of=/dev/sdb'; time dd if=/dev/zero bs=1M count=10000 oflag=direct of=/dev/sdb; \
echo 'Running blkdiscard /dev/sdb'; time blkdiscard /dev/sdb; \
echo 'Running partprobe /dev/sdb'; time partprobe /dev/sdb; \
echo 'Disk operations completed on /dev/sdb'"
]
volumeMounts:
- mountPath: /dev
name: dev
securityContext:
runAsUser: 0
runAsGroup: 0
volumes:
- name: dev
hostPath:
path: /dev
topologySpreadConstraints:
- maxSkew: 1
labelSelector:
matchLabels:
app.kubernetes.io/name: *app
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule

View File

@@ -19,8 +19,8 @@ tasks:
"containers": [
{
"name": "debug",
"image": "ghcr.io/onedr0p/alpine:rolling",
"command": ["/bin/bash"],
"image": "cgr.dev/chainguard/wolfi-base",
"command": ["sleep","9999999"],
"stdin": true,
"stdinOnce": true,
"tty": true,
@@ -44,8 +44,8 @@ tasks:
}
}'
requires:
vars: ["claim"]
vars: [claim]
vars:
ns: '{{.ns | default "default"}}'
preconditions:
- { msg: "PVC not found", sh: "kubectl -n {{.ns}} get persistentvolumeclaim {{.claim}}" }
- { msg: PVC not found, sh: "kubectl -n {{.ns}} get persistentvolumeclaim {{.claim}}" }

View File

@@ -19,8 +19,8 @@ x-env: &env
ts: '{{.ts}}'
vars:
scriptsDir: '{{.ROOT_DIR}}/.taskfiles/VolSync/scripts'
templatesDir: '{{.ROOT_DIR}}/.taskfiles/VolSync/templates'
scriptsDir: '{{.ROOT_DIR}}/.taskfiles/volsync/scripts'
templatesDir: '{{.ROOT_DIR}}/.taskfiles/volsync/templates'
ts: '{{now | date "150405"}}'
tasks:

View File

@@ -13,7 +13,7 @@
<div align="center">
[![Discord](https://img.shields.io/discord/673534664354430999?style=for-the-badge&label&logo=discord&logoColor=white&color=blue)](https://discord.gg/k8s-at-home)
[![Kubernetes](https://img.shields.io/badge/v1.31-blue?style=for-the-badge&logo=kubernetes&logoColor=white)](https://talos.dev/)
[![Kubernetes](https://img.shields.io/badge/v1.32-blue?style=for-the-badge&logo=kubernetes&logoColor=white)](https://talos.dev/)
[![Renovate](https://img.shields.io/github/actions/workflow/status/auricom/home-ops/renovate.yaml?branch=main&label=&logo=renovatebot&style=for-the-badge&color=blue)](https://github.com/auricom/home-ops/actions/workflows/renovate.yaml)
</div>

View File

@@ -14,12 +14,13 @@ env:
SOPS_AGE_KEY_FILE: "~/.config/sops/age/keys.txt"
includes:
ansible: .taskfiles/Ansible/Taskfile.yaml
external-secrets: .taskfiles/ExternalSecrets/Taskfile.yaml
flux: .taskfiles/Flux/Taskfile.yaml
kubernetes: .taskfiles/Kubernetes/Taskfile.yaml
sops: .taskfiles/Sops/Taskfile.yaml
volsync: .taskfiles/VolSync/Taskfile.yaml
ansible: .taskfiles/ansible/Taskfile.yaml
bootstrap: .taskfiles/bootstrap/Taskfile.yaml
external-secrets: .taskfiles/externalsecrets/Taskfile.yaml
flux: .taskfiles/flux/Taskfile.yaml
kubernetes: .taskfiles/kubernetes/Taskfile.yaml
sops: .taskfiles/sops/Taskfile.yaml
volsync: .taskfiles/volsync/Taskfile.yaml
tasks:

View File

@@ -1,5 +1,5 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
@@ -25,7 +25,7 @@ spec:
substitute:
APP: *app
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:

View File

@@ -15,7 +15,7 @@ metadata:
namespace: actions-runner-system
spec:
type: alertmanager
address: http://kube-prometheus-stack-alertmanager.monitoring:9093/api/v2/alerts/
address: http://kube-prometheus-stack-alertmanager.observability:9093/api/v2/alerts/
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json
apiVersion: notification.toolkit.fluxcd.io/v1beta3

View File

@@ -38,4 +38,4 @@ spec:
enabled: true
servicemonitor:
enabled: true
prometheusInstance: monitoring
prometheusInstance: observability

View File

@@ -3,15 +3,18 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: cert-manager-webhook-ovh
name: cloudflare
namespace: cert-manager
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: cert-manager-webhook-ovh-secret
name: cloudflare-secret
template:
engineVersion: v2
data:
CLOUDFLARE_TOKEN: "{{ .CLOUDFLARE_TOKEN }}"
dataFrom:
- extract:
# applicationKey, applicationSecret, consumerKey
key: cert-manager-webhook-ovh
key: cloudflare

View File

@@ -0,0 +1,42 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/cert-manager.io/clusterissuer_v1.json
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-production
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: "${SECRET_CLUSTER_DOMAIN_EMAIL}"
privateKeySecretRef:
name: letsencrypt-production
solvers:
- dns01:
cloudflare:
email: "${SECRET_CLUSTER_DOMAIN_EMAIL}"
apiTokenSecretRef:
name: cloudflare-secret
key: CLOUDFLARE_TOKEN
selector:
dnsZones: ["${SECRET_EXTERNAL_DOMAIN}"]
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/cert-manager.io/clusterissuer_v1.json
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
server: https://acme-staging-v02.api.letsencrypt.org/directory
email: "${SECRET_CLUSTER_DOMAIN_EMAIL}"
privateKeySecretRef:
name: letsencrypt-staging
solvers:
- dns01:
cloudflare:
email: "${SECRET_CLUSTER_DOMAIN_EMAIL}"
apiTokenSecretRef:
name: cloudflare-secret
key: CLOUDFLARE_TOKEN
selector:
dnsZones: ["${SECRET_EXTERNAL_DOMAIN}"]

View File

@@ -1,5 +1,5 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
@@ -23,21 +23,21 @@ spec:
substitute:
APP: *app
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cert-manager-webhook-ovh
name: &app cert-manager-issuers
namespace: flux-system
spec:
targetNamespace: cert-manager
commonMetadata:
labels:
app.kubernetes.io/name: &app cert-manager
app.kubernetes.io/name: *app
dependsOn:
- name: cert-manager
- name: external-secrets-stores
path: ./kubernetes/apps/cert-manager/cert-manager/webhook-ovh
path: ./kubernetes/apps/cert-manager/cert-manager/issuers
prune: true
sourceRef:
kind: GitRepository

View File

@@ -1,69 +0,0 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: cert-manager-webhook-ovh
namespace: cert-manager
spec:
interval: 30m
chart:
spec:
chart: cert-manager-webhook-ovh
version: 0.7.3
sourceRef:
kind: HelmRepository
name: cert-manager-webhook-ovh
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
configVersion: 0.0.1
podAnnotations:
reloader.stakater.com/auto: "true"
groupName: "${SECRET_DOMAIN}"
certManager:
namespace: cert-manager
serviceAccountName: cert-manager
issuers:
- name: letsencrypt-staging
create: true
kind: ClusterIssuer
acmeServerUrl: https://acme-staging-v02.api.letsencrypt.org/directory
email: "${SECRET_EXTERNAL_DOMAIN_EMAIL}"
ovhEndpointName: ovh-eu
ovhAuthenticationRef:
applicationKeyRef:
name: cert-manager-webhook-ovh-secret
key: applicationKey
applicationSecretRef:
name: cert-manager-webhook-ovh-secret
key: applicationSecret
consumerKeyRef:
name: cert-manager-webhook-ovh-secret
key: consumerKey
- name: letsencrypt-production
create: true
kind: ClusterIssuer
acmeServerUrl: https://acme-v02.api.letsencrypt.org/directory
email: "${SECRET_EXTERNAL_DOMAIN_EMAIL}"
ovhEndpointName: ovh-eu
ovhAuthenticationRef:
applicationKeyRef:
name: cert-manager-webhook-ovh-secret
key: applicationKey
applicationSecretRef:
name: cert-manager-webhook-ovh-secret
key: applicationSecret
consumerKeyRef:
name: cert-manager-webhook-ovh-secret
key: consumerKey

View File

@@ -14,7 +14,7 @@ metadata:
namespace: cert-manager
spec:
type: alertmanager
address: http://kube-prometheus-stack-alertmanager.monitoring:9093/api/v2/alerts/
address: http://kube-prometheus-stack-alertmanager.observability:9093/api/v2/alerts/
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json
apiVersion: notification.toolkit.fluxcd.io/v1beta3

View File

@@ -0,0 +1,203 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/postgres-operator.crunchydata.com/postgrescluster_v1beta1.json
apiVersion: postgres-operator.crunchydata.com/v1beta1
kind: PostgresCluster
metadata:
name: &name postgres
spec:
postgresVersion: 17
metadata:
labels:
crunchy-userinit.ramblurr.github.com/enabled: "true"
crunchy-userinit.ramblurr.github.com/superuser: postgres
patroni: # turn on sync writes to at least 1 other replica
dynamicConfiguration:
synchronous_mode: true
postgresql:
max_wal_size: 5GB
synchronous_commit: "on"
pg_hba:
- hostnossl authelia all 192.168.8.0/22 md5 # Needed because authelia does not support SSL yet
- hostssl all all all md5
parameters:
max_connections: 500
instances:
- name: postgres
metadata:
labels:
app.kubernetes.io/name: crunchy-postgres
replicas: &replica 2
dataVolumeClaimSpec:
storageClassName: openebs-hostpath
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 80Gi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
postgres-operator.crunchydata.com/cluster: *name
postgres-operator.crunchydata.com/data: postgres
users:
# Superuser
- name: postgres
databases:
- postgres
options: SUPERUSER
password: &password
type: AlphaNumeric
# Applications
- name: authelia
databases:
- authelia
password: *password
- name: bazarr
databases:
- bazarr_main
- bazarr_log
password: *password
- name: ghostfolio
databases:
- ghostfolio
password: *password
- name: home-assistant
databases:
- home-assistant
password: *password
- name: joplin
databases:
- joplin
password: *password
- name: lldap
databases:
- lldap
password: *password
- name: lidarr
databases:
- lidarr_main
- lidarr_log
password: *password
- name: lychee
databases:
- lychee
password: *password
- name: outline
databases:
- outline
password: *password
- name: paperless
databases:
- paperless
password: *password
- name: prowlarr
databases:
- prowlarr_main
- prowlarr_logs
password: *password
- name: pushover-notifier
databases:
- pushover-notifier
password: *password
- name: radarr
databases:
- radarr_main
- radarr_log
password: *password
- name: sonarr
databases:
- sonarr_main
- sonarr_log
password: *password
- name: tandoor
databases:
- tandoor
password: *password
- name: vikunja
databases:
- vikunja
password: *password
backups:
pgbackrest:
configuration: &backupConfig
- secret:
name: crunchy-postgres-secret
global: &backupFlag
compress-type: bz2
compress-level: "9"
# Minio
repo1-block: y
repo1-bundle: y
repo1-path: /crunchy-pgo
repo1-retention-full: "30" # days
repo1-retention-full-type: time
repo1-s3-uri-style: path
manual:
repoName: repo1
options:
- --type=full
metadata:
labels:
app.kubernetes.io/name: crunchy-postgres-backup
repos:
- name: repo1 # Minio
s3: &minio
bucket: crunchy-postgres-operator
endpoint: "s3.${SECRET_INTERNAL_DOMAIN}"
region: us-east-1
schedules:
full: 0 1 * * 0 # Sunday at 01:00
differential: 0 1 * * 1-6 # Mon-Sat at 01:00
incremental: 0 2-23 * * * # Every hour except 01:00
# dataSource:
# pgbackrest:
# stanza: "db"
# configuration: *backupConfig
# global: *backupFlag
# repo:
# name: "repo1"
# s3: *minio
monitoring:
pgmonitor:
exporter:
resources:
requests:
cpu: 10m
memory: 64M
limits:
memory: 512M
proxy:
pgBouncer:
port: 5432
service:
metadata:
annotations:
lbipam.cilium.io/ips: ${CLUSTER_LB_POSTGRES}
type: LoadBalancer
replicas: *replica
metadata:
labels:
app.kubernetes.io/name: crunchy-postgres-pgbouncer
config:
global:
pool_mode: session # Grafana requires session https://github.com/grafana/grafana/issues/74260#issuecomment-1702795311. Everything else is happy with transaction
client_tls_sslmode: prefer
default_pool_size: "100"
max_client_conn: "500"
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
postgres-operator.crunchydata.com/cluster: *name
postgres-operator.crunchydata.com/role: pgbouncer

View File

@@ -0,0 +1,26 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: crunchy-postgres
spec:
refreshInterval: 5m
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: crunchy-postgres-secret
template:
engineVersion: v2
data:
s3.conf: |
[global]
repo1-s3-key={{ .CRUNCHY_POSTGRES_S3_ACCESS_KEY }}
repo1-s3-key-secret={{ .CRUNCHY_POSTGRES_S3_SECRET_KEY }}
encryption.conf: |
[global]
repo1-cipher-pass={{ .CRUNCHY_POSTGRES_BACKUP_ENCRYPTION_CIPHER }}
dataFrom:
- extract:
key: crunchy-postgres

View File

@@ -0,0 +1,8 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./externalsecret.yaml
- ./cluster.yaml
- ./podmonitor.yaml

View File

@@ -0,0 +1,37 @@
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: cpgo-postgres
spec:
jobLabel: cpgo-postgres
namespaceSelector:
matchNames:
- database
podMetricsEndpoints:
- honorLabels: true
path: /metrics
port: exporter
relabelings:
- sourceLabels:
[
"__meta_kubernetes_namespace",
"__meta_kubernetes_pod_label_postgres_operator_crunchydata_com_cluster",
]
targetLabel: pg_cluster
separator: "/"
replacement: "$1$2"
- sourceLabels:
[
__meta_kubernetes_pod_label_postgres_operator_crunchydata_com_instance,
]
targetLabel: deployment
- sourceLabels:
[__meta_kubernetes_pod_label_postgres_operator_crunchydata_com_role]
targetLabel: role
- sourceLabels: [__meta_kubernetes_pod_name]
targetLabel: instance
selector:
matchLabels:
postgres-operator.crunchydata.com/cluster: postgres
postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true"

View File

@@ -0,0 +1,19 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: crunchy-pgo-secrets
spec:
provider:
kubernetes:
remoteNamespace: database
server:
caProvider:
type: ConfigMap
name: kube-root-ca.crt
namespace: database
key: ca.crt
auth:
serviceAccount:
name: external-secrets-pg
namespace: database

View File

@@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./rbac.yaml
- ./clustersecretstore.yaml

View File

@@ -0,0 +1,31 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-secrets-pg
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["authorization.k8s.io"]
resources: ["selfsubjectrulesreviews"]
verbs: ["create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: &name external-secrets-pg
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: *name
subjects:
- kind: ServiceAccount
name: *name
namespace: database
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-secrets-pg
namespace: database

View File

@@ -0,0 +1,110 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app crunchy-postgres-operator
namespace: flux-system
spec:
targetNamespace: database
commonMetadata:
labels:
app.kubernetes.io/name: *app
interval: 30m
timeout: 5m
path: ./kubernetes/apps/database/crunchy-postgres-operator/operator
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
wait: true
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app crunchy-postgres-operator-cluster
namespace: flux-system
spec:
targetNamespace: database
commonMetadata:
labels:
app.kubernetes.io/name: *app
interval: 30m
timeout: 5m
path: ./kubernetes/apps/database/crunchy-postgres-operator/cluster
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
wait: true
dependsOn:
- name: crunchy-postgres-operator
- name: external-secrets-stores
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app crunchy-postgres-operator-secretstore
namespace: flux-system
spec:
targetNamespace: database
commonMetadata:
labels:
app.kubernetes.io/name: *app
interval: 30m
timeout: 5m
path: ./kubernetes/apps/database/crunchy-postgres-operator/clustersecretstore
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
wait: true
dependsOn:
- name: crunchy-postgres-operator-cluster
- name: external-secrets
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app crunchy-postgres-userinit-controller
namespace: flux-system
spec:
targetNamespace: database
commonMetadata:
labels:
app.kubernetes.io/name: *app
interval: 30m
timeout: 5m
path: ./kubernetes/apps/database/crunchy-postgres-operator/userinit-controller
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
wait: true
dependsOn:
- name: crunchy-postgres-operator-cluster
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app crunchy-postgres-pgadmin
namespace: flux-system
spec:
targetNamespace: database
commonMetadata:
labels:
app.kubernetes.io/name: *app
interval: 30m
timeout: 5m
path: ./kubernetes/apps/database/crunchy-postgres-operator/pgadmin
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
wait: true
dependsOn:
- name: crunchy-postgres-operator-cluster

View File

@@ -0,0 +1,28 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: crunchy-postgres-operator
spec:
interval: 30m
chart:
spec:
chart: pgo
version: 5.7.2
sourceRef:
kind: HelmRepository
name: crunchydata
namespace: flux-system
interval: 5m
install:
crds: CreateReplace
upgrade:
crds: CreateReplace
dependsOn:
- name: openebs
namespace: openebs-system
values:
install:
clusterLabels:
app.kubernetes.io/name: pgo

View File

@@ -0,0 +1,6 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./helmrelease.yaml

View File

@@ -0,0 +1,20 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: pgadmin
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: pgadmin-secret
template:
engineVersion: v2
data:
# App
PGADMIN_PASSWORD: "{{ .password }}"
dataFrom:
- extract:
key: pgadmin

View File

@@ -0,0 +1,33 @@
---
# trunk-ignore(checkov/CKV_K8S_21)
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: pgadmin
annotations:
hajimari.io/icon: mdi:database
gethomepage.dev/enabled: "true"
gethomepage.dev/name: pgAdmin
gethomepage.dev/description: PostgreSQL management tool.
gethomepage.dev/group: Infrrastructure
gethomepage.dev/icon: pgadmin.png
gethomepage.dev/pod-selector: >-
app in (
pgadmin
)
spec:
ingressClassName: internal
tls:
- hosts:
- &host pgadmin.${SECRET_EXTERNAL_DOMAIN}
rules:
- host: *host
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: pgadmin
port:
number: 5050

View File

@@ -0,0 +1,9 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./externalsecret.yaml
- ./ingress.yaml
- ./pgadmin.yaml
- ./service.yaml

View File

@@ -0,0 +1,22 @@
---
apiVersion: postgres-operator.crunchydata.com/v1beta1
kind: PGAdmin
metadata:
name: pgadmin
spec:
users:
- username: admin@homelab.io
role: Administrator
passwordRef:
name: pgadmin-secret
key: PGADMIN_PASSWORD
dataVolumeClaimSpec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
serverGroups:
- name: supply
postgresClusterSelector: {}
serviceName: pgadmin

View File

@@ -0,0 +1,14 @@
---
# trunk-ignore(checkov/CKV_K8S_21)
apiVersion: v1
kind: Service
metadata:
name: pgadmin
spec:
type: ClusterIP
ports:
- name: pgadmin-port
port: 5050
protocol: TCP
selector:
postgres-operator.crunchydata.com/pgadmin: pgadmin

View File

@@ -0,0 +1,17 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: userinit-controller
spec:
interval: 30m
chart:
spec:
chart: crunchy-userinit-controller
version: 0.0.4
sourceRef:
kind: HelmRepository
name: crunchy-userinit
values:
fullnameOverride: crunchy-userinit-controller

View File

@@ -0,0 +1,10 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: crunchy-userinit
spec:
interval: 30m
url: https://ramblurr.github.io/crunchy-userinit-controller
timeout: 3m

View File

@@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./helmrepository.yaml
- ./helmrelease.yaml

View File

@@ -4,7 +4,6 @@ apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: emqx
namespace: default
spec:
secretStoreRef:
kind: ClusterSecretStore

View File

@@ -4,7 +4,6 @@ apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: emqx
namespace: default
spec:
interval: 30m
chart:
@@ -37,12 +36,12 @@ spec:
EMQX_DASHBOARD__DEFAULT_USERNAME: admin
service:
type: LoadBalancer
loadBalancerIP: 192.168.169.109
loadBalancerIP: ${CLUSTER_LB_EMQX}
externalTrafficPolicy: Local
ingress:
dashboard:
enabled: true
ingressClassName: nginx
ingressClassName: internal
annotations:
hajimari.io/appName: "EMQX"
hajimari.io/icon: simple-icons:eclipsemosquitto
@@ -63,17 +62,6 @@ spec:
enabled: true
storageClass: rook-ceph-block
size: 400Mi
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values: ["emqx"]
topologyKey: kubernetes.io/hostname
resources:
requests:
cpu: 100m

View File

@@ -2,7 +2,6 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./externalsecret.yaml
- ./helmrelease.yaml

View File

@@ -1,19 +1,19 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app emqx
namespace: flux-system
spec:
targetNamespace: default
targetNamespace: database
commonMetadata:
labels:
app.kubernetes.io/name: *app
dependsOn:
- name: rook-ceph-cluster
- name: external-secrets-stores
path: ./kubernetes/apps/default/emqx/app
path: ./kubernetes/apps/database/emqx/app
prune: true
sourceRef:
kind: GitRepository

View File

@@ -6,7 +6,7 @@ resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./cloudnative-pg/ks.yaml
- ./crunchy-postgres-operator/ks.yaml
- ./dragonfly/ks.yaml
- ./emqx/ks.yaml
- ./influx/ks.yaml
- ./pgadmin/ks.yaml

View File

@@ -14,7 +14,7 @@ metadata:
namespace: database
spec:
type: alertmanager
address: http://kube-prometheus-stack-alertmanager.monitoring:9093/api/v2/alerts/
address: http://kube-prometheus-stack-alertmanager.observability:9093/api/v2/alerts/
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json
apiVersion: notification.toolkit.fluxcd.io/v1beta3

View File

@@ -1,29 +0,0 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: atuin
namespace: default
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: atuin-secret
template:
engineVersion: v2
data:
# App
ATUIN_DB_URI: "postgres://{{ .POSTGRES_USER }}:{{ .POSTGRES_PASS }}@postgres16-rw.database.svc.cluster.local/atuin"
# Postgres Init
INIT_POSTGRES_DBNAME: atuin
INIT_POSTGRES_HOST: postgres16-rw.database.svc.cluster.local
INIT_POSTGRES_USER: "{{ .POSTGRES_USER }}"
INIT_POSTGRES_PASS: "{{ .POSTGRES_PASS }}"
INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}"
dataFrom:
- extract:
key: atuin
- extract:
key: cloudnative-pg

Some files were not shown because too many files have changed in this diff Show More