new gitops template

This commit is contained in:
auricom
2021-04-13 10:34:08 +02:00
parent 67c4d6a855
commit a95f32b44d
335 changed files with 3131 additions and 3650 deletions

111
.github/renovate.json5 vendored
View File

@@ -1,65 +1,109 @@
{
"enabled": true,
"timezone": "Europe/Paris",
"semanticCommits": "enabled",
"dependencyDashboard": true,
"dependencyDashboardTitle": "Renovate Dashboard",
"commitMessageSuffix": "[ci-skip]",
"commitBody": "Signed-off-by: Auricom <auricom@users.noreply.github.com>",
// Do not notify on closed unmerged PRs
"suppressNotifications": ["prIgnoreNotification"],
// Do not rebase PRs
"rebaseWhen": "conflicted",
"assignees": ["@auricom"],
"kubernetes": {
"fileMatch": ["cluster/.+\\.yaml$"],
"ignorePaths": [
"cluster/base/"
]
},
"helm-values": {
"fileMatch": ["cluster/.+\\.yaml$"]
"fileMatch": ["cluster/.+helm-release\\.yaml$"]
},
"regexManagers": [
// regexManager to read and process HelmReleases and CRDs
{
"fileMatch": ["cluster/.+\\.yaml$"],
"fileMatch": [
"cluster/.+helm-release\\.yaml$",
"cluster/core/crds/.+\\.yaml$"
],
"matchStrings": [
"registryUrl=(?<registryUrl>.*?)\n *chart: (?<depName>.*?)\n *version: (?<currentValue>.*)\n"
// helm releases
"registryUrl=(?<registryUrl>.*?)\n *chart: (?<depName>.*?)\n *version: (?<currentValue>.*)\n",
// kube-prometheus-stack crd
"registryUrl=(?<registryUrl>.*?)\n *tag: (?<depName>[a-zA-Z-]+)-(?<currentValue>.*)\n",
// rook-ceph crd
"registryUrl=(?<registryUrl>.*?) chart=(?<depName>.*?)\n *tag: (?<currentValue>.*)\n",
// cert-manager crd
"registryUrl=(?<registryUrl>.*?) chart=(?<depName>.*?)\n.*\\/(?<currentValue>.*?)\\/"
],
"datasourceTemplate": "helm"
}
},
],
"packageRules": [
// Setup datasources
{
"datasources": ["helm"],
"commitMessageTopic": "Helm chart {{depName}}",
"matchDatasources": ["helm"],
"separateMinorPatch": true
},
// Add labels according to package and update types
// global docker datasource settings
{
"datasources": ["docker"],
"updateTypes": ["major"],
"labels": ["renovate/image-release", "dependency/major"],
"enabled": true
"matchDatasources": ["docker"],
"enabled": true,
"commitMessageTopic": "container image {{depName}}",
"commitMessageExtra": "to {{#if isSingleVersion}}v{{{newVersion}}}{{else}}{{{newValue}}}{{/if}}",
"matchUpdateTypes": ["major", "minor", "patch"],
"separateMinorPatch": true
},
// add labels according to package and update types
{
"matchDatasources": ["docker"],
"matchUpdateTypes": ["major"],
"labels": ["renovate/image", "dep/major"]
},
{
"datasources": ["docker"],
"updateTypes": ["minor"],
"labels": ["renovate/image-release", "dependency/minor"]
"matchDatasources": ["docker"],
"matchUpdateTypes": ["minor"],
"labels": ["renovate/image", "dep/minor"]
},
{
"datasources": ["docker"],
"updateTypes": ["patch"],
"labels": ["renovate/image-release", "dependency/patch"]
"matchDatasources": ["docker"],
"matchUpdateTypes": ["patch"],
"labels": ["renovate/image", "dep/patch"]
},
{
"datasources": ["helm"],
"updateTypes": ["major"],
"labels": ["renovate/helm-release", "dependency/major"]
"matchDatasources": ["helm"],
"matchUpdateTypes": ["major"],
"labels": ["renovate/helm", "dep/major"]
},
{
"datasources": ["helm"],
"updateTypes": ["minor"],
"labels": ["renovate/helm-release", "dependency/minor"]
"matchDatasources": ["helm"],
"matchUpdateTypes": ["minor"],
"labels": ["renovate/helm", "dep/minor"]
},
{
"datasources": ["helm"],
"updateTypes": ["patch"],
"labels": ["renovate/helm-release", "dependency/patch"]
"matchDatasources": ["helm"],
"matchUpdateTypes": ["patch"],
"labels": ["renovate/helm", "dep/patch"]
},
// custom version schemes
{
"matchDatasources": ["docker"],
"versioning": "regex:^(?<major>\\d+)\\.(?<minor>\\d+)\\.(?<patch>\\d+)-(?<compatibility>.*)$",
"matchPackageNames": ["blakeblackshear/frigate"]
},
{
"matchDatasources": ["docker"],
"versioning": "regex:^RELEASE\\.(?<major>\\d+)-(?<minor>\\d+)-(?<patch>\\d+)T.*Z(-(?<compatibility>.*))?$",
"matchPackageNames": ["minio/minio"]
},
// pin package versions
{
"matchDatasources": ["docker"],
"allowedVersions": "<13",
"matchPackageNames": [
"prodrigestivill/postgres-backup-local"
]
},
{
"matchDatasources": ["docker"],
@@ -68,13 +112,18 @@
"influxdb",
]
},
// Enable auto-merge
// enable auto-merge
{
"datasources": ["docker"],
"matchDatasources": ["docker"],
"automerge": true,
"automergeType": "branch",
"requiredStatusChecks": null,
"updateTypes": ["minor", "patch"],
"packageNames": ["ghcr.io/k8s-at-home/jackett"]
"matchUpdateTypes": ["minor", "patch"],
"matchPackageNames": [
"flaresolverr/flaresolverr",
"ghcr.io/k8s-at-home/jackett",
"ghcr.io/k8s-at-home/prowlarr"
]
}
]
}

View File

@@ -3,11 +3,11 @@ name: update-flux
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * *"
- cron: "0 12 * * *"
jobs:
flux-update:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
@@ -19,11 +19,11 @@ jobs:
- name: Update Flux components
id: update
run: |
UGLY_VERSION="$(flux -v)"
VERSION="v${UGLY_VERSION#*flux version }"
CLI_VERSION="$(flux -v)"
VERSION="v${CLI_VERSION#*flux version }"
flux install --version="${VERSION}" \
--network-policy=false \
--export > ./cluster/flux-system/gotk-components.yaml
--export > ./cluster/base/flux-system/gotk-components.yaml
echo "::set-output name=flux_version::$VERSION"
- name: Create pull request for Flux update

2
.gitignore vendored
View File

@@ -1,5 +1,3 @@
# Secrets
*.clear
# Flux
flux
bin/

6
.sops.yaml Normal file
View File

@@ -0,0 +1,6 @@
---
creation_rules:
- encrypted_regex: "((?i)(pass|secret($|[^N])|key|token|^data$|^stringData))"
pgp: >-
$FLUX_KEY_FP,
$PERSONAL_KEY_FP

View File

@@ -23,7 +23,8 @@ spec:
pullPolicy: IfNotPresent
env:
SIGNUPS_ALLOWED: "false"
DOMAIN: "https://bitwarden.k3s.xpander.ovh/"
DOMAIN: "https://bitwarden.${SECRET_CLUSTER_DOMAIN}/"
DATABASE_URL: ${SECRET_BITWARDENRS_DB_URL}
bitwardenrs:
domain: ""
signupsAllowed: false
@@ -46,15 +47,12 @@ spec:
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: bitwarden.k3s.xpander.ovh
- host: bitwarden.${SECRET_CLUSTER_DOMAIN}
paths:
- /
tls:
- hosts:
- "bitwarden.k3s.xpander.ovh"
- "bitwarden.${SECRET_CLUSTER_DOMAIN}"
persistence:
enabled: false
existingClaim: bitwarden-config
valuesFrom:
- kind: ConfigMap
name: bitwardenrs-helmrelease
enabled: true
existingClaim: bitwardenrs-config

View File

@@ -1,6 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./gotk-components.yaml
- ./gotk-sync.yaml
- ./secrets.yaml
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bitwardenrs-config
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -21,9 +21,13 @@ spec:
repository: ghcr.io/linuxserver/bookstack
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "v0.31.8-ls126"
tag: "version-v21.04"
env:
APP_URL: https://bookstack.k3s.xpander.ovh/
APP_URL: https://bookstack.${SECRET_CLUSTER_DOMAIN}/
DB_HOST: mariadb
DB_DATABASE: bookstack
DB_USERNAME: bookstack
DB_PASSWORD: ${SECRET_BOOKSTACK_DB_PASSWORD}
service:
port:
port: 80
@@ -36,18 +40,15 @@ spec:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0
hosts:
- host: bookstack.k3s.xpander.ovh
- host: bookstack.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "bookstack.k3s.xpander.ovh"
- "bookstack.${SECRET_CLUSTER_DOMAIN}"
persistence:
config:
enabled: true
mountPath: /config
existingClaim: bookstack-config
valuesFrom:
- kind: ConfigMap
name: bookstack-helmrelease

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bookstack-config
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -3,7 +3,7 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: forecastle
namespace: kube-system
namespace: data
spec:
interval: 5m
chart:
@@ -23,18 +23,18 @@ spec:
namespaceSelector:
matchNames:
- data
- media
- home
- development
- home
- media
- network
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: forecastle.k3s.xpander.ovh
- host: home.${SECRET_CLUSTER_DOMAIN}
paths:
- /
tls:
- hosts:
- forecastle.k3s.xpander.ovh
- home.${SECRET_CLUSTER_DOMAIN}

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -27,7 +27,7 @@ spec:
env:
TZ: Europe/Paris
CRON_MIN: "18,48"
DOMAIN: "https://freshrss.k3s.xpander.ovh/"
DOMAIN: "https://freshrss.${SECRET_CLUSTER_DOMAIN}/"
service:
port:
port: 80
@@ -39,13 +39,13 @@ spec:
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: freshrss.k3s.xpander.ovh
- host: freshrss.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "freshrss.k3s.xpander.ovh"
- "freshrss.${SECRET_CLUSTER_DOMAIN}"
persistence:
config:
enabled: true

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: freshrss-config
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- serviceaccount.yaml

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: jobs
namespace: data
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: jobs-edit
namespace: data
subjects:
- kind: ServiceAccount
name: jobs
roleRef:
kind: ClusterRole
name: edit
apiGroup: rbac.authorization.k8s.io

View File

@@ -37,9 +37,14 @@ spec:
prometheus.io/protocol: tcp
env:
APP_BASE_URL: https://joplin.k3s.xpander.ovh
APP_BASE_URL: https://joplin.${SECRET_CLUSTER_DOMAIN}
APP_PORT: 22300
DB_CLIENT: pg
POSTGRES_HOST: postgresql
POSTGRES_PORT: 5432
POSTGRES_DATABASE: joplin
POSTGRES_USER: joplin
POSTGRES_PASSWORD: ${SECRET_JOPLIN_DB_PASSWORD}
nodeSelector: {}
@@ -54,14 +59,10 @@ spec:
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: joplin.k3s.xpander.ovh
- host: joplin.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "joplin.k3s.xpander.ovh"
valuesFrom:
- kind: ConfigMap
name: joplin-server-helmrelease
- "joplin.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -0,0 +1,17 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- bitwardenrs
- bookstack
- forecastle
- freshrss
- jobs
- joplin-server
- pgadmin
- pgbackups
- recipes
- resilio-sync
- searx
- sharry
- vikunja
- volumes

View File

@@ -3,7 +3,7 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: pgadmin
namespace: development
namespace: data
spec:
interval: 5m
chart:
@@ -21,8 +21,11 @@ spec:
type: Recreate
image:
repository: dpage/pgadmin4
tag: "5.1"
tag: 5.1
pullPolicy: IfNotPresent
env:
email: ${SECRET_PGADMIN_EMAIL}
password: ${SECRET_PGADMIN_PASSWORD}
service:
annotations:
prometheus.io/probe: "true"
@@ -33,15 +36,12 @@ spec:
kubernetes.io/ingress.class: "nginx"
prometheus.io/probe: "true"
hosts:
- host: pgadmin.k3s.xpander.ovh
- host: pgadmin.${SECRET_CLUSTER_DOMAIN}
paths:
- /
tls:
- hosts:
- pgadmin.k3s.xpander.ovh
- pgadmin.${SECRET_CLUSTER_DOMAIN}
persistentVolume:
enabled: true
existingClaim: pgadmin-config
valuesFrom:
- kind: ConfigMap
name: pgadmin-helmrelease

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pgadmin-config
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -24,25 +24,13 @@ spec:
image: prodrigestivill/postgres-backup-local:13
env:
- name: POSTGRES_HOST
valueFrom:
secretKeyRef:
name: pgbackups-config
key: POSTGRES_HOST
value: postgresql
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: pgbackups-config
key: POSTGRES_DB
value: authelia,bitwarden,freshrss,gitea,hass,healthchecks,joplin,lychee,postgres,recipes,sharry,vikunja,wallabag
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: pgbackups-config
key: POSTGRES_USER
value: postgres
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: pgbackups-config
key: POSTGRES_PASSWORD
value: ${SECRET_POSTGRESQL_POSTGRES_PASSWORD}
- name: POSTGRES_EXTRA_OPTS
value: "-Z9 --schema=public --blobs"
- name: SCHEDULE
@@ -62,13 +50,13 @@ spec:
ports:
- containerPort: 8080
volumeMounts:
- name: nfs-backups
- name: nfs-backups-data
mountPath: /backups
subPath: postgresql
volumes:
- name: nfs-backups
- name: nfs-backups-data
persistentVolumeClaim:
claimName: nfs-backups
claimName: nfs-backups-data
dnsConfig:
options:
- name: ndots

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml

View File

@@ -25,9 +25,15 @@ spec:
tag: 0.15.1
pullPolicy: IfNotPresent
env:
SECRET_KEY: ${SECRET_POSTGRESQL_RECIPES_SECRET_KEY}
DEBUG: "0"
ALLOWED_HOSTS: "*"
DB_ENGINE: django.db.backends.postgresql
POSTGRES_HOST: postgresql
POSTGRES_PORT: 5432
POSTGRES_DB: recipes
POSTGRES_USER: recipes
POSTGRES_PASSWORD: ${SECRET_POSTGRESQL_RECIPES_DB_PASSWORD}
GUNICORN_MEDIA: "0"
TIMEZONE: Europe/Paris
FRACTION_PREF_DEFAULT: "0"
@@ -45,13 +51,13 @@ spec:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/client-body-buffer-size: "10m"
hosts:
- host: recipes.k3s.xpander.ovh
- host: recipes.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "recipes.k3s.xpander.ovh"
- "recipes.${SECRET_CLUSTER_DOMAIN}"
persistence:
config:
enabled: false
@@ -65,7 +71,7 @@ spec:
existingClaim: recipes-static
additionalContainers:
- name: nginx
image: nginx:1.19.6
image: nginx:1.19.10
ports:
- containerPort: 80
name: http
@@ -78,6 +84,3 @@ spec:
mountPath: /etc/nginx/nginx.conf
subPath: nginx-config
readOnly: true
valuesFrom:
- kind: ConfigMap
name: recipes-helmrelease

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volumes.yaml

View File

@@ -0,0 +1,26 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: recipes-media
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: recipes-static
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- statefulset.yaml
- volumes.yaml

View File

@@ -24,7 +24,7 @@ spec:
app.kubernetes.io/name: resilio-sync
spec:
containers:
- image: linuxserver/resilio-sync:amd64-2.7.2.1375-ls82
- image: linuxserver/resilio-sync:amd64-2.7.2.1375-ls86
name: resilio-sync-claude
env:
- name: TZ
@@ -44,20 +44,20 @@ spec:
- name: sync-conf-claude
mountPath: /config/sync.conf
subPath: sync.conf
- name: home-claude
- name: home-claude-data
mountPath: /sync/home/claude
- name: nfs-photo
- name: nfs-photo-data
mountPath: /sync/photo
- name: nfs-backups
- name: nfs-backups-data
mountPath: /sync/backup
- name: nfs-music
- name: nfs-music-data
mountPath: /sync/music
- name: nfs-video
- name: nfs-video-data
mountPath: /sync/video
subPath: video
- name: nfs-shared-documents
- name: nfs-shared-documents-data
mountPath: /sync/shared-documents
- image: linuxserver/resilio-sync:amd64-2.7.2.1375-ls82
- image: linuxserver/resilio-sync:amd64-2.7.2.1375-ls86
name: resilio-sync-helene
env:
- name: TZ
@@ -77,9 +77,9 @@ spec:
- name: sync-conf-helene
mountPath: /config/sync.conf
subPath: sync.conf
- name: home-helene
- name: home-helene-data
mountPath: /sync/home
- name: nfs-backups
- name: nfs-backups-data
mountPath: /sync/backup
volumes:
- name: sync-conf-claude
@@ -94,27 +94,27 @@ spec:
- name: config-helene
persistentVolumeClaim:
claimName: resilio-sync-config-helene
- name: home-claude
- name: home-claude-data
persistentVolumeClaim:
claimName: nfs-home-claude
- name: home-helene
claimName: nfs-home-claude-data
- name: home-helene-data
persistentVolumeClaim:
claimName: nfs-home-helene
- name: nfs-backups
claimName: nfs-home-helene-data
- name: nfs-backups-data
persistentVolumeClaim:
claimName: nfs-backups
- name: nfs-photo
claimName: nfs-backups-data
- name: nfs-photo-data
persistentVolumeClaim:
claimName: nfs-photo-data
- name: nfs-music
- name: nfs-music-data
persistentVolumeClaim:
claimName: nfs-music-data
- name: nfs-video
- name: nfs-video-data
persistentVolumeClaim:
claimName: nfs-video-data
- name: nfs-shared-documents
- name: nfs-shared-documents-data
persistentVolumeClaim:
claimName: nfs-shared-documents
claimName: nfs-shared-documents-data
dnsConfig:
options:
- name: ndots
@@ -167,32 +167,6 @@ data:
}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: resilio-sync-config-claude
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: resilio-sync-config-helene
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
annotations:
@@ -226,7 +200,7 @@ spec:
app.kubernetes.io/name: resilio-sync
type: LoadBalancer
externalIPs:
- 192.168.169.106
- ${CLUSTER_LB_RESILIOSYNC}
externalTrafficPolicy: Local
---
apiVersion: networking.k8s.io/v1
@@ -241,7 +215,7 @@ metadata:
namespace: data
spec:
rules:
- host: resilio-sync-claude.k3s.xpander.ovh
- host: resilio-sync-claude.${SECRET_CLUSTER_DOMAIN}
http:
paths:
- path: /
@@ -251,7 +225,7 @@ spec:
name: resilio-sync
port:
number: 8888
- host: resilio-sync-helene.k3s.xpander.ovh
- host: resilio-sync-helene.${SECRET_CLUSTER_DOMAIN}
http:
paths:
- path: /
@@ -263,5 +237,5 @@ spec:
number: 8889
tls:
- hosts:
- resilio-sync-claude.k3s.xpander.ovh
- resilio-sync-helene.k3s.xpander.ovh
- resilio-sync-claude.${SECRET_CLUSTER_DOMAIN}
- resilio-sync-helene.${SECRET_CLUSTER_DOMAIN}

View File

@@ -0,0 +1,26 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: resilio-sync-config-claude
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: resilio-sync-config-helene
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -29,21 +29,19 @@ spec:
prometheus.io/probe: "true"
prometheus.io/protocol: http
searx:
baseUrl: https://searx.k3s.xpander.ovh
baseUrl: https://searx.${SECRET_CLUSTER_DOMAIN}
mortyKey: ${SECRET_SEARX_MORTY_KEY}
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.networking.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.${SECRET_CLUSTER_DOMAIN}/"
hosts:
- host: searx.k3s.xpander.ovh
- host: searx.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "searx.k3s.xpander.ovh"
valuesFrom:
- kind: ConfigMap
name: searx-helmrelease
- "searx.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -40,7 +40,10 @@ spec:
- "/opt/sharry.conf"
sharry:
baseUrl: "https://sharry.k3s.xpander.ovh"
baseUrl: "https://sharry.${SECRET_CLUSTER_DOMAIN}"
jdbcUrl: ${SECRET_SHARRY_JDBC_URL}
jdbcUser: sharry
jdbcPassword: ${SECRET_SHARRY_DB_PASSWORD}
chunkSize: "512K"
maxSize: "1.5G"
maxValidity: "31 days"
@@ -72,14 +75,10 @@ spec:
nginx.ingress.kubernetes.io/proxy-body-size: "2048m"
nginx.ingress.kubernetes.io/proxy-buffering: "off"
hosts:
- host: sharry.k3s.xpander.ovh
- host: sharry.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "sharry.k3s.xpander.ovh"
valuesFrom:
- kind: ConfigMap
name: sharry-helmrelease
- "sharry.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -44,6 +44,30 @@ spec:
podAnnotations: {}
additionalContainers:
- name: api
image: vikunja/api:latest
imagePullPolicy: Always
env:
- name: VIKUNJA_DATABASE_TYPE
value: "postgres"
- name: VIKUNJA_SERVICE_JWTSECRET
value: ${SECRET_VIKUNJA_JWT_SECRET}
- name: VIKUNJA_DATABASE_HOST
value: postgresql
- name: VIKUNJA_DATABASE_DATABASE
value: vikunja
- name: VIKUNJA_DATABASE_USER
value: vikunja
- name: VIKUNJA_DATABASE_PASSWORD
value: ${SECRET_VIKUNJA_PASSWORD}
volumeMounts:
- name: files
mountPath: /app/vikunja/files
- name: frontend
image: vikunja/frontend:latest
imagePullPolicy: Always
additionalVolumes:
- name: vikunja-config
configMap:
@@ -57,14 +81,10 @@ spec:
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: vikunja.k3s.xpander.ovh
- host: vikunja.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "vikunja.k3s.xpander.ovh"
valuesFrom:
- kind: ConfigMap
name: vikunja-helmrelease
- "vikunja.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -1,21 +1,3 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: jobs
namespace: data
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: jobs-edit
namespace: data
subjects:
- kind: ServiceAccount
name: jobs
roleRef:
kind: ClusterRole
name: edit
apiGroup: rbac.authorization.k8s.io
---
apiVersion: batch/v1beta1
kind: CronJob

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- job.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: vikunja-files
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- nfs-backups-data.yaml
- nfs-home-claude-data.yaml
- nfs-home-helene-data.yaml
- nfs-music-data.yaml
- nfs-photo-data.yaml
- nfs-video-data.yaml
- nfs-shared-documents-data.yaml

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-backups-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-backups-data
namespace: data
spec:
storageClassName: nfs-backups-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/backups"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backups-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-backups-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-home-claude-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-home-claude-data
namespace: data
spec:
storageClassName: nfs-home-claude-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/home/claude"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-home-claude-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-home-claude-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-home-helene-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-home-helene-data
namespace: data
spec:
storageClassName: nfs-home-helene-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/home/helene"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-home-helene-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-home-helene-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-music-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-music-data
namespace: data
spec:
storageClassName: nfs-music-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/music"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-music-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-music-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-photo-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-photo-data
namespace: data
spec:
storageClassName: nfs-photo-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/photo"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-photo-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-photo-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-shared-documents-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-shared-documents-data
namespace: data
spec:
storageClassName: nfs-shared-documents-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/shared-documents"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-shared-documents-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-shared-documents-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-video-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-video-data
namespace: data
spec:
storageClassName: nfs-video-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/video"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-video-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-video-data
resources:
requests:
storage: 1Mi

View File

@@ -20,9 +20,15 @@ spec:
storage: s3
s3:
region: "us-east-1"
regionEndpoint: ${SECRET_MINIO_ENDPOINT}
bucket: docker-registry
encrypt: false
secure: true
secrets:
htpasswd: ${SECRET_DOCKER_REGISTRY_HTPASSWD}
s3:
accessKey: ${SECRET_MINIO_ACCESS_KEY}
secretKey: ${SECRET_MINIO_SECRET_KEY}
ingress:
enabled: true
annotations:
@@ -31,15 +37,11 @@ spec:
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
hosts:
- registry.k3s.xpander.ovh
- registry.${SECRET_CLUSTER_DOMAIN}
tls:
- hosts:
- registry.k3s.xpander.ovh
- registry.${SECRET_CLUSTER_DOMAIN}
service:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
valuesFrom:
- kind: ConfigMap
name: docker-registry-helmrelease
optional: false

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -21,7 +21,4 @@ spec:
enabled: false
env:
KUBERNETES_NAMESPACE: development
valuesFrom:
- kind: ConfigMap
name: drone-kubernetes-secrets-helmrelease
optional: false
SECRET_KEY: ${SECRET_DRONE_PLUGIN_TOKEN}

View File

@@ -19,16 +19,14 @@ spec:
values:
image:
repository: drone/drone-runner-kube
tag: 1.0.0-beta.5
tag: 1.0.0-beta.6
env:
DRONE_RPC_SECRET: ${SECRET_DRONE_RPC_SECRET}
DRONE_RPC_PROTO: http
DRONE_SECRET_PLUGIN_TOKEN: ${SECRET_DRONE_PLUGIN_TOKEN}
DRONE_NAMESPACE_DEFAULT: development
DRONE_RPC_HOST: drone.development.svc.cluster.local
DRONE_RPC_HOST: drone
DRONE_SECRET_PLUGIN_ENDPOINT: http://drone-kubernetes-secrets:3000
rbac:
buildNamespaces:
- development
valuesFrom:
- kind: ConfigMap
name: drone-runner-kube-helmrelease
optional: false

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- drone-runner-kube-secrets-helm-release.yaml
- helm-release.yaml
- rbac.yaml

View File

@@ -26,25 +26,25 @@ spec:
enabled: true
existingClaim: drone-config
env:
DRONE_DATABASE_SECRET: ${SECRET_DRONE_DATABASE_SECRET}
DRONE_GITEA_CLIENT_ID: ${SECRET_DRONE_GITEA_CLIENT_ID}
DRONE_GITEA_CLIENT_SECRET: ${SECRET_DRONE_GITEA_CLIENT_SECRET}
DRONE_RPC_SECRET: ${SECRET_DRONE_RPC_SECRET}
DRONE_SERVER_PROTO: https
DRONE_GIT_ALWAYS_AUTH: true
DRONE_LOGS_TEXT: true
DRONE_LOGS_PRETTY: true
DRONE_LOGS_COLOR: true
DRONE_SERVER_HOST: drone.k3s.xpander.ovh
DRONE_GITEA_SERVER: https://gitea.k3s.xpander.ovh
DRONE_SERVER_HOST: drone.${SECRET_CLUSTER_DOMAIN}
DRONE_GITEA_SERVER: https://gitea.${SECRET_CLUSTER_DOMAIN}
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
prometheus.io/probe: "true"
hosts:
- host: drone.k3s.xpander.ovh
- host: drone.${SECRET_CLUSTER_DOMAIN}
paths: ["/"]
tls:
- hosts:
- drone.k3s.xpander.ovh
valuesFrom:
- kind: ConfigMap
name: drone-helmrelease
optional: false
- drone.${SECRET_CLUSTER_DOMAIN}

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: drone-config
namespace: development
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 10Gi

View File

@@ -28,10 +28,10 @@ spec:
nginx.ingress.kubernetes.io/proxy-body-size: 128m
prometheus.io/probe: "true"
hosts:
- "gitea.k3s.xpander.ovh"
- "gitea.${SECRET_CLUSTER_DOMAIN}"
tls:
- hosts:
- "gitea.k3s.xpander.ovh"
- "gitea.${SECRET_CLUSTER_DOMAIN}"
service:
http:
port: 3000
@@ -43,12 +43,21 @@ spec:
port: 22
externalTrafficPolicy: Local
externalIPs:
- 192.168.169.104
- ${CLUSTER_LB_GITEA}
gitea:
admin:
email: ${SECRET_GITEA_ADMIN_EMAIL}
username: auricom
password: ${SECRET_GITEA_ADMIN_PASSWORD}
config:
APP_NAME: "Homelab Gitea"
database:
DB_TYPE: postgres
HOST: postgresql:5432
NAME: gitea
USER: gitea
PASSWD: ${SECRET_GITEA_DB_PASSWORD}
SCHEMA: gitea
server:
SSH_PORT: 22
SSH_LISTEN_PORT: 22
@@ -76,6 +85,3 @@ spec:
tag: 1.6.9
service:
port: 11211
valuesFrom:
- kind: ConfigMap
name: gitea-helmrelease

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-config
namespace: development
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 15Gi

View File

@@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- docker-registry
- drone
- drone-runner-kube
- gitea

View File

@@ -30,16 +30,16 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.networking.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.${SECRET_CLUSTER_DOMAIN}/"
hosts:
- host: esphome.k3s.xpander.ovh
- host: esphome.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "esphome.k3s.xpander.ovh"
- "esphome.${SECRET_CLUSTER_DOMAIN}"
service:
annotations:
prometheus.io/probe: "true"

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: esphome-config
namespace: home
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 10Gi

View File

@@ -23,24 +23,35 @@ spec:
tag: 2021.4.4
env:
TZ: "Europe/Paris"
HASS_SECRET_URL: https://hass.${SECRET_CLUSTER_DOMAIN}
HASS_SECRET_LATITUDE: ${SECRET_HASS_LATITUDE}
HASS_SECRET_LONGITUDE: ${SECRET_HASS_LONGITUDE}
HASS_SECRET_ELEVATION: ${SECRET_HASS_ELEVATION}
HASS_SECRET_MQTT_USERNAME: ${SECRET_MQTT_USERNAME}
HASS_SECRET_MQTT_PASSWORD: ${SECRET_MQTT_PASSWORD}
HASS_SECRET_DB_URL: ${SECRET_HASS_DB_URL}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
service:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
type: LoadBalancer
externalIPs:
- ${CLUSTER_LB_HASS}
externalTrafficPolicy: Local
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: hass.k3s.xpander.ovh
- host: hass.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "hass.k3s.xpander.ovh"
- "hass.${SECRET_CLUSTER_DOMAIN}"
prometheus:
serviceMonitor:
enabled: false
@@ -74,13 +85,13 @@ spec:
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: hass-config.k3s.xpander.ovh
- host: hass-config.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- hass-config.k3s.xpander.ovh
- hass-config.${SECRET_CLUSTER_DOMAIN}
volumeMounts:
- name: config
mountPath: /config
@@ -88,10 +99,5 @@ spec:
requests:
cpu: 500m
memory: 1000Mi
limits:
memory: 2500Mi
postgresql:
enabled: false
valuesFrom:
- kind: ConfigMap
name: hass-helmrelease

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: hass-config
namespace: home
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- esphome
- home-assistant
- node-red
- vernemq
- zigbee2mqtt
- zwavejs2mqtt

View File

@@ -38,13 +38,13 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.networking.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.${SECRET_CLUSTER_DOMAIN}/"
hosts:
- host: node-red.k3s.xpander.ovh
- host: node-red.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- node-red.k3s.xpander.ovh
- node-red.${SECRET_CLUSTER_DOMAIN}

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: node-red-config
namespace: home
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 10Gi

View File

@@ -30,7 +30,7 @@ spec:
prometheus.io/protocol: tcp
type: LoadBalancer
externalIPs:
- 192.168.169.111
- ${CLUSTER_LB_VERNEMQ}
ws:
enabled: true
nodePort: 32080

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -0,0 +1,95 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: zigbee2mqtt
namespace: home
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://k8s-at-home.com/charts/
chart: zigbee2mqtt
version: 7.2.0
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
values:
image:
repository: koenkk/zigbee2mqtt
tag: 1.18.2
env:
TZ: Europe/Paris
ZIGBEE2MQTT_DATA: /data
config:
homeassistant: true
device_options:
retain: true
permit_join: false
mqtt:
base_topic: zigbee2mqtt
server: "mqtt://vernemq"
user: "${SECRET_MQTT_USERNAME}"
password: "${SECRET_MQTT_PASSWORD}"
serial:
port: /dev/ttyUSB0
advanced:
log_output:
- console
network_key:
- 204
- 61
- 75
- 23
- 44
- 230
- 24
- 203
- 53
- 5
- 248
- 32
- 50
- 84
- 44
- 159
frontend:
port: 8080
experimental:
new_api: true
securityContext:
privileged: true
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.networking.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.${SECRET_CLUSTER_DOMAIN}/"
hosts:
- host: "zigbee.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "zigbee.${SECRET_CLUSTER_DOMAIN}"
persistence:
config:
enabled: true
existingClaim: zigbee2mqtt-config
mountPath: "/data"
additionalVolumeMounts:
- name: usb
mountPath: /dev/ttyUSB0
additionalVolumes:
- name: usb
hostPath:
path: /dev/ttyUSB0
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- {key: "feature.node.kubernetes.io/custom-zigbee", operator: In, values: ["true"]}

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zigbee2mqtt-config
namespace: home
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 10Gi

View File

@@ -29,16 +29,16 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.networking.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.${SECRET_CLUSTER_DOMAIN}/"
hosts:
- host: zwave.k3s.xpander.ovh
- host: zwave.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- zwave.k3s.xpander.ovh
- zwave.${SECRET_CLUSTER_DOMAIN}
service:
annotations:
prometheus.io/probe: "true"

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zwavejs2mqtt-config
namespace: home
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- data
- development
- home
- media
- monitoring
- networking
- system-upgrade

View File

@@ -33,12 +33,12 @@ spec:
enabled: true
existingClaim: bazarr-config
additionalVolumeMounts:
- name: nfs-video
- name: nfs-video-media
mountPath: "/mnt/storage/video"
additionalVolumes:
- name: nfs-video
- name: nfs-video-media
persistentVolumeClaim:
claimName: nfs-video
claimName: nfs-video-media
service:
annotations:
prometheus.io/probe: "true"
@@ -53,17 +53,17 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.networking.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.${SECRET_CLUSTER_DOMAIN}/"
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Accept-Encoding "";
sub_filter '</head>' '<link rel="stylesheet" type="text/css" href="https://gilbn.github.io/theme.park/CSS/themes/bazarr/space-gray.css"></head>';
sub_filter_once on;
hosts:
- host: bazarr.k3s.xpander.ovh
- host: bazarr.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "bazarr.k3s.xpander.ovh"
- "bazarr.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bazarr-config
namespace: media
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 20Gi

View File

@@ -26,6 +26,8 @@ spec:
FLOOD_OPTION_RUNDIR: /data
FLOOD_OPTION_AUTH: "none"
FLOOD_OPTION_QBURL: "http://qbittorrent:8080"
FLOOD_OPTION_QBUSER: admin
FLOOD_OPTION_QBPASS: ${SECRET_QBITTORRENT_PASSWORD}
podSecurityContext:
runAsUser: 1001
runAsGroup: 1001
@@ -34,16 +36,16 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.networking.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.${SECRET_CLUSTER_DOMAIN}/"
hosts:
- host: flood.k3s.xpander.ovh
- host: flood.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "flood.k3s.xpander.ovh"
- "flood.${SECRET_CLUSTER_DOMAIN}"
persistence:
data:
enabled: true
@@ -58,6 +60,3 @@ spec:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
valuesFrom:
- kind: ConfigMap
name: flood-helmrelease

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: flood-config
namespace: media
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -46,13 +46,13 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.networking.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.${SECRET_CLUSTER_DOMAIN}/"
hosts:
- host: jackett.k3s.xpander.ovh
- host: jackett.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "jackett.k3s.xpander.ovh"
- "jackett.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jackett-config
namespace: media
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -34,22 +34,22 @@ spec:
enabled: true
existingClaim: jellyfin-config
additionalVolumeMounts:
- name: nfs-music
- name: nfs-music-media
mountPath: "/mnt/storage/music"
- name: nfs-video
- name: nfs-video-media
mountPath: "/mnt/storage/video"
- name: nfs-photo
- name: nfs-photo-media
mountPath: "/mnt/storage/photo"
additionalVolumes:
- name: nfs-music
- name: nfs-music-media
persistentVolumeClaim:
claimName: nfs-music
- name: nfs-video
claimName: nfs-music-media
- name: nfs-video-media
persistentVolumeClaim:
claimName: nfs-video
- name: nfs-photo
claimName: nfs-video-media
- name: nfs-photo-media
persistentVolumeClaim:
claimName: nfs-photo
claimName: nfs-photo-media
resources:
requests:
memory: 4Gi
@@ -63,10 +63,10 @@ spec:
forecastle.stakater.com/expose: "true"
forecastle.stakater.com/icon: "https://features.jellyfin.org/images/logos/a7Lx9nYDzWuDR94Az8Yum7neWMvNMndkm9qr4QVtmjaMrOHDLisS5K7LJctTRzK9-icon-transparent.png?size=200"
hosts:
- host: jellyfin.k3s.xpander.ovh
- host: jellyfin.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "jellyfin.k3s.xpander.ovh"
- "jellyfin.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-config
namespace: media
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- serviceaccount.yaml

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: jobs
namespace: media
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: jobs-edit
namespace: media
subjects:
- kind: ServiceAccount
name: jobs
roleRef:
kind: ClusterRole
name: edit
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,19 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- bazarr
- flood
- jackett
- jellyfin
- jobs
- lidarr
- lychee
- navidrome
- prowlarr
- pyload
- qbittorrent
- radarr
- sonarr
- tdarr
- travelstories
- volumes

View File

@@ -33,14 +33,14 @@ spec:
enabled: true
existingClaim: lidarr-config
additionalVolumeMounts:
- name: nfs-music
- name: nfs-music-media
mountPath: "/mnt/storage/music"
- name: qbittorrent-cache
mountPath: "/downloads"
additionalVolumes:
- name: nfs-music
- name: nfs-music-media
persistentVolumeClaim:
claimName: nfs-music
claimName: nfs-music-media
- name: qbittorrent-cache
persistentVolumeClaim:
claimName: qbittorrent-cache
@@ -58,17 +58,17 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.networking.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.${SECRET_CLUSTER_DOMAIN}/"
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Accept-Encoding "";
sub_filter '</head>' '<link rel="stylesheet" type="text/css" href="https://gilbn.github.io/theme.park/CSS/themes/lidarr/space-gray.css"></head>';
sub_filter_once on;
hosts:
- host: lidarr.k3s.xpander.ovh
- host: lidarr.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "lidarr.k3s.xpander.ovh"
- "lidarr.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lidarr-config
namespace: media
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 20Gi

View File

@@ -34,16 +34,21 @@ spec:
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: lychee.k3s.xpander.ovh
- host: lychee.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "lychee.k3s.xpander.ovh"
- "lychee.${SECRET_CLUSTER_DOMAIN}"
env:
PHP_TZ: Europe/Paris
DB_CONNECTION: pgsql
DB_HOST: postgresql
DB_PORT: 5432
DB_DATABASE: lychee
DB_USERNAME: lychee
DB_PASSWORD: ${SECRET_LYCHEE_DB_PASSWORD}
persistence:
config:
enabled: true
@@ -60,7 +65,4 @@ spec:
photo:
enabled: true
mountPath: /mnt/storage/photo
existingClaim: nfs-photo
valuesFrom:
- kind: ConfigMap
name: lychee-helmrelease
existingClaim: nfs-photo-media

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volumes.yaml

View File

@@ -2,38 +2,38 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: drone-config
namespace: development
name: lychee-config
namespace: media
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 10Gi
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-config
namespace: development
name: lychee-uploads
namespace: media
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 15Gi
storage: 50Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pgadmin-config
namespace: development
name: lychee-sym
namespace: media
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 5Gi
storage: 1Gi

Some files were not shown because too many files have changed in this diff Show More