new gitops template

This commit is contained in:
auricom
2021-04-13 10:34:08 +02:00
parent 67c4d6a855
commit a95f32b44d
335 changed files with 3131 additions and 3650 deletions

View File

@@ -0,0 +1,58 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: bitwardenrs
namespace: data
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://k8s-at-home.com/charts/
chart: bitwardenrs
version: 2.1.5
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
values:
image:
repository: bitwardenrs/server
tag: 1.20.0
pullPolicy: IfNotPresent
env:
SIGNUPS_ALLOWED: "false"
DOMAIN: "https://bitwarden.${SECRET_CLUSTER_DOMAIN}/"
DATABASE_URL: ${SECRET_BITWARDENRS_DB_URL}
bitwardenrs:
domain: ""
signupsAllowed: false
websockets:
enabled: false
admin:
enabled: true
disableAdminToken: false
existingSecret:
enabled: false
name: ""
tokenKey: ""
service:
port: 80
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: bitwarden.${SECRET_CLUSTER_DOMAIN}
paths:
- /
tls:
- hosts:
- "bitwarden.${SECRET_CLUSTER_DOMAIN}"
persistence:
enabled: true
existingClaim: bitwardenrs-config

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bitwardenrs-config
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,54 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: bookstack
namespace: data
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://nicholaswilde.github.io/helm-charts/
chart: bookstack
version: 0.1.16
sourceRef:
kind: HelmRepository
name: nicholaswilde-charts
namespace: flux-system
interval: 5m
values:
image:
repository: ghcr.io/linuxserver/bookstack
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "version-v21.04"
env:
APP_URL: https://bookstack.${SECRET_CLUSTER_DOMAIN}/
DB_HOST: mariadb
DB_DATABASE: bookstack
DB_USERNAME: bookstack
DB_PASSWORD: ${SECRET_BOOKSTACK_DB_PASSWORD}
service:
port:
port: 80
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0
hosts:
- host: bookstack.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "bookstack.${SECRET_CLUSTER_DOMAIN}"
persistence:
config:
enabled: true
mountPath: /config
existingClaim: bookstack-config

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bookstack-config
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,40 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: forecastle
namespace: data
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://stakater.github.io/stakater-charts
chart: forecastle
version: v1.0.61
sourceRef:
kind: HelmRepository
name: stakater-charts
namespace: flux-system
interval: 5m
values:
forecastle:
config:
title: "Xpander Homelab"
namespaceSelector:
matchNames:
- data
- development
- home
- media
- network
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: home.${SECRET_CLUSTER_DOMAIN}
paths:
- /
tls:
- hosts:
- home.${SECRET_CLUSTER_DOMAIN}

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -0,0 +1,57 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: freshrss
namespace: data
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://k8s-at-home.com/charts/
chart: freshrss
version: 4.2.0
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
values:
controllerType: deployment
strategy:
type: Recreate
image:
repository: freshrss/freshrss
tag: 1.18.0
pullPolicy: IfNotPresent
env:
TZ: Europe/Paris
CRON_MIN: "18,48"
DOMAIN: "https://freshrss.${SECRET_CLUSTER_DOMAIN}/"
service:
port:
port: 80
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: freshrss.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "freshrss.${SECRET_CLUSTER_DOMAIN}"
persistence:
config:
enabled: true
mountPath: /var/www/FreshRSS/data
existingClaim: freshrss-config
resources:
requests:
cpu: 50m
memory: 256Mi

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: freshrss-config
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- serviceaccount.yaml

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: jobs
namespace: data
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: jobs-edit
namespace: data
subjects:
- kind: ServiceAccount
name: jobs
roleRef:
kind: ClusterRole
name: edit
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,68 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: joplin-server
namespace: data
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://k8s-at-home.com/charts/
chart: joplin-server
version: 2.2.0
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
values:
image:
repository: joplin/server
tag: 1.7.2
pullPolicy: IfNotPresent
controllerType: deployment
strategy:
type: Recreate
resources: {}
service:
port:
port: 22300
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: tcp
env:
APP_BASE_URL: https://joplin.${SECRET_CLUSTER_DOMAIN}
APP_PORT: 22300
DB_CLIENT: pg
POSTGRES_HOST: postgresql
POSTGRES_PORT: 5432
POSTGRES_DATABASE: joplin
POSTGRES_USER: joplin
POSTGRES_PASSWORD: ${SECRET_JOPLIN_DB_PASSWORD}
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: joplin.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "joplin.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -0,0 +1,17 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- bitwardenrs
- bookstack
- forecastle
- freshrss
- jobs
- joplin-server
- pgadmin
- pgbackups
- recipes
- resilio-sync
- searx
- sharry
- vikunja
- volumes

View File

@@ -0,0 +1,47 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: pgadmin
namespace: data
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://helm.runix.net
chart: pgadmin4
version: 1.5.6
sourceRef:
kind: HelmRepository
name: runix-charts
namespace: flux-system
interval: 5m
values:
strategy:
type: Recreate
image:
repository: dpage/pgadmin4
tag: 5.1
pullPolicy: IfNotPresent
env:
email: ${SECRET_PGADMIN_EMAIL}
password: ${SECRET_PGADMIN_PASSWORD}
service:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
prometheus.io/probe: "true"
hosts:
- host: pgadmin.${SECRET_CLUSTER_DOMAIN}
paths:
- /
tls:
- hosts:
- pgadmin.${SECRET_CLUSTER_DOMAIN}
persistentVolume:
enabled: true
existingClaim: pgadmin-config

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pgadmin-config
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,63 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pgbackups
namespace: data
labels:
app.kubernetes.io/instance: pgbackups
app.kubernetes.io/name: pgbackups
spec:
selector:
matchLabels:
app.kubernetes.io/instance: pgbackups
app.kubernetes.io/name: pgbackups
replicas: 1
template:
metadata:
labels:
app.kubernetes.io/instance: pgbackups
app.kubernetes.io/name: pgbackups
spec:
containers:
- name: pgbackups
image: prodrigestivill/postgres-backup-local:13
env:
- name: POSTGRES_HOST
value: postgresql
- name: POSTGRES_DB
value: authelia,bitwarden,freshrss,gitea,hass,healthchecks,joplin,lychee,postgres,recipes,sharry,vikunja,wallabag
- name: POSTGRES_USER
value: postgres
- name: POSTGRES_PASSWORD
value: ${SECRET_POSTGRESQL_POSTGRES_PASSWORD}
- name: POSTGRES_EXTRA_OPTS
value: "-Z9 --schema=public --blobs"
- name: SCHEDULE
value: "@daily"
- name: BACKUP_KEEP_DAYS
value: "7"
- name: BACKUP_KEEP_WEEKS
value: "4"
- name: BACKUP_KEEP_MONTHS
value: "3"
- name: HEALTHCHECK_PORT
value: "8080"
resources:
requests:
cpu: 150m
memory: 256Mi
ports:
- containerPort: 8080
volumeMounts:
- name: nfs-backups-data
mountPath: /backups
subPath: postgresql
volumes:
- name: nfs-backups-data
persistentVolumeClaim:
claimName: nfs-backups-data
dnsConfig:
options:
- name: ndots
value: "1"

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml

View File

@@ -0,0 +1,86 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: recipes
namespace: data
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://k8s-at-home.com/charts/
chart: recipes
version: 4.2.0
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
values:
controllerType: deployment
strategy:
type: Recreate
image:
repository: vabene1111/recipes
tag: 0.15.1
pullPolicy: IfNotPresent
env:
SECRET_KEY: ${SECRET_POSTGRESQL_RECIPES_SECRET_KEY}
DEBUG: "0"
ALLOWED_HOSTS: "*"
DB_ENGINE: django.db.backends.postgresql
POSTGRES_HOST: postgresql
POSTGRES_PORT: 5432
POSTGRES_DB: recipes
POSTGRES_USER: recipes
POSTGRES_PASSWORD: ${SECRET_POSTGRESQL_RECIPES_DB_PASSWORD}
GUNICORN_MEDIA: "0"
TIMEZONE: Europe/Paris
FRACTION_PREF_DEFAULT: "0"
COMMENT_PREF_DEFAULT: "1"
SHOPPING_MIN_AUTOSYNC_INTERVAL: "5"
service:
port:
port: 80
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/client-body-buffer-size: "10m"
hosts:
- host: recipes.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "recipes.${SECRET_CLUSTER_DOMAIN}"
persistence:
config:
enabled: false
media:
enabled: true
mountPath: /opt/recipes/mediafiles
existingClaim: recipes-media
static:
enabled: true
mountPath: /opt/recipes/staticfiles
existingClaim: recipes-static
additionalContainers:
- name: nginx
image: nginx:1.19.10
ports:
- containerPort: 80
name: http
volumeMounts:
- name: media
mountPath: "/media"
- name: static
mountPath: "/static"
- name: recipes-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx-config
readOnly: true

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volumes.yaml

View File

@@ -0,0 +1,26 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: recipes-media
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: recipes-static
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- statefulset.yaml
- volumes.yaml

View File

@@ -0,0 +1,241 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: resilio-sync
namespace: data
labels:
app.kubernetes.io/instance: resilio-sync
app.kubernetes.io/name: resilio-sync
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: resilio-sync
app.kubernetes.io/name: resilio-sync
updateStrategy:
type: RollingUpdate
serviceName: resilio-sync
strategy:
template:
metadata:
labels:
app.kubernetes.io/instance: resilio-sync
app.kubernetes.io/name: resilio-sync
spec:
containers:
- image: linuxserver/resilio-sync:amd64-2.7.2.1375-ls86
name: resilio-sync-claude
env:
- name: TZ
value: "Europe/Paris"
- name: PUID
value: "1026"
- name: PGID
value: "1000"
ports:
- containerPort: 8888
name: http-claude
- containerPort: 55555
name: com-claude
volumeMounts:
- name: config-claude
mountPath: /config
- name: sync-conf-claude
mountPath: /config/sync.conf
subPath: sync.conf
- name: home-claude-data
mountPath: /sync/home/claude
- name: nfs-photo-data
mountPath: /sync/photo
- name: nfs-backups-data
mountPath: /sync/backup
- name: nfs-music-data
mountPath: /sync/music
- name: nfs-video-data
mountPath: /sync/video
subPath: video
- name: nfs-shared-documents-data
mountPath: /sync/shared-documents
- image: linuxserver/resilio-sync:amd64-2.7.2.1375-ls86
name: resilio-sync-helene
env:
- name: TZ
value: "Europe/Paris"
- name: PUID
value: "1027"
- name: PGID
value: "1000"
ports:
- containerPort: 8889
name: http-helene
- containerPort: 55556
name: com-helene
volumeMounts:
- name: config-helene
mountPath: /config
- name: sync-conf-helene
mountPath: /config/sync.conf
subPath: sync.conf
- name: home-helene-data
mountPath: /sync/home
- name: nfs-backups-data
mountPath: /sync/backup
volumes:
- name: sync-conf-claude
configMap:
name: resilio-sync-claude-conf
- name: sync-conf-helene
configMap:
name: resilio-sync-helene-conf
- name: config-claude
persistentVolumeClaim:
claimName: resilio-sync-config-claude
- name: config-helene
persistentVolumeClaim:
claimName: resilio-sync-config-helene
- name: home-claude-data
persistentVolumeClaim:
claimName: nfs-home-claude-data
- name: home-helene-data
persistentVolumeClaim:
claimName: nfs-home-helene-data
- name: nfs-backups-data
persistentVolumeClaim:
claimName: nfs-backups-data
- name: nfs-photo-data
persistentVolumeClaim:
claimName: nfs-photo-data
- name: nfs-music-data
persistentVolumeClaim:
claimName: nfs-music-data
- name: nfs-video-data
persistentVolumeClaim:
claimName: nfs-video-data
- name: nfs-shared-documents-data
persistentVolumeClaim:
claimName: nfs-shared-documents-data
dnsConfig:
options:
- name: ndots
value: "1"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: resilio-sync-claude-conf
namespace: data
data:
sync.conf: |
{
"listening_port" : 55555,
"storage_path" : "/config",
"vendor" : "docker",
"display_new_version": false,
"directory_root_policy" : "belowroot",
"directory_root" : "/sync/",
"webui" :
{
"listen" : "0.0.0.0:8888",
"allow_empty_password" : false,
"dir_whitelist" : [ "/sync", "/sync/folders", "/sync/mounted_folders" ]
}
}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: resilio-sync-helene-conf
namespace: data
data:
sync.conf: |
{
"listening_port" : 55556,
"storage_path" : "/config",
"vendor" : "docker",
"display_new_version": false,
"directory_root_policy" : "belowroot",
"directory_root" : "/sync/",
"webui" :
{
"listen" : "0.0.0.0:8889",
"allow_empty_password" : false,
"dir_whitelist" : [ "/sync", "/sync/folders", "/sync/mounted_folders" ]
}
}
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: tcp
labels:
app.kubernetes.io/instance: resilio-sync
app.kubernetes.io/name: resilio-sync
name: resilio-sync
namespace: data
spec:
ports:
- name: http-claude
port: 8888
protocol: TCP
targetPort: 8888
- name: http-helene
port: 8889
protocol: TCP
targetPort: 8889
- name: com-claude
port: 55555
protocol: TCP
targetPort: 55555
- name: com-helene
port: 55556
protocol: TCP
targetPort: 55556
selector:
app.kubernetes.io/instance: resilio-sync
app.kubernetes.io/name: resilio-sync
type: LoadBalancer
externalIPs:
- ${CLUSTER_LB_RESILIOSYNC}
externalTrafficPolicy: Local
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: "nginx"
labels:
app.kubernetes.io/instance: resilio-sync
app.kubernetes.io/name: resilio-sync
name: resilio-sync
namespace: data
spec:
rules:
- host: resilio-sync-claude.${SECRET_CLUSTER_DOMAIN}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: resilio-sync
port:
number: 8888
- host: resilio-sync-helene.${SECRET_CLUSTER_DOMAIN}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: resilio-sync
port:
number: 8889
tls:
- hosts:
- resilio-sync-claude.${SECRET_CLUSTER_DOMAIN}
- resilio-sync-helene.${SECRET_CLUSTER_DOMAIN}

View File

@@ -0,0 +1,26 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: resilio-sync-config-claude
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: resilio-sync-config-helene
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,47 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: searx
namespace: data
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://k8s-at-home.com/charts/
chart: searx
version: 3.2.0
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
values:
controllerType: deployment
strategy:
type: Recreate
image:
repository: searx/searx
tag: 1.0.0
pullPolicy: IfNotPresent
service:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
searx:
baseUrl: https://searx.${SECRET_CLUSTER_DOMAIN}
mortyKey: ${SECRET_SEARX_MORTY_KEY}
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.networking.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.${SECRET_CLUSTER_DOMAIN}/"
hosts:
- host: searx.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "searx.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -0,0 +1,84 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: sharry
namespace: data
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://k8s-at-home.com/charts/
chart: sharry
version: 2.2.0
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
values:
image:
repository: eikek0/sharry
tag: 1.6.0
pullPolicy: IfNotPresent
controllerType: deployment
strategy:
type: Recreate
resources: {}
service:
port:
port: 9090
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
args:
- "/opt/sharry.conf"
sharry:
baseUrl: "https://sharry.${SECRET_CLUSTER_DOMAIN}"
jdbcUrl: ${SECRET_SHARRY_JDBC_URL}
jdbcUser: sharry
jdbcPassword: ${SECRET_SHARRY_DB_PASSWORD}
chunkSize: "512K"
maxSize: "1.5G"
maxValidity: "31 days"
signup: "closed"
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
additionalVolumes:
- name: sharry-config
configMap:
name: sharry-config
additionalVolumeMounts:
- name: sharry-config
mountPath: /opt/sharry.conf
subPath: sharry.conf
ingress:
enabled: true
annotations:
# -- Nginx client Body Buffer Size
nginx.ingress.kubernetes.io/client-body-buffer-size: "2048m"
# -- Nginx Proxy Body Size
nginx.ingress.kubernetes.io/proxy-body-size: "2048m"
nginx.ingress.kubernetes.io/proxy-buffering: "off"
hosts:
- host: sharry.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "sharry.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -0,0 +1,90 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: vikunja
namespace: data
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://k8s-at-home.com/charts/
chart: vikunja
version: 2.2.0
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
values:
image:
repository: caddy
tag: 2.3.0-alpine
pullPolicy: IfNotPresent
controllerType: deployment
strategy:
type: Recreate
resources: {}
service:
port:
port: 8080
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
additionalContainers:
- name: api
image: vikunja/api:latest
imagePullPolicy: Always
env:
- name: VIKUNJA_DATABASE_TYPE
value: "postgres"
- name: VIKUNJA_SERVICE_JWTSECRET
value: ${SECRET_VIKUNJA_JWT_SECRET}
- name: VIKUNJA_DATABASE_HOST
value: postgresql
- name: VIKUNJA_DATABASE_DATABASE
value: vikunja
- name: VIKUNJA_DATABASE_USER
value: vikunja
- name: VIKUNJA_DATABASE_PASSWORD
value: ${SECRET_VIKUNJA_PASSWORD}
volumeMounts:
- name: files
mountPath: /app/vikunja/files
- name: frontend
image: vikunja/frontend:latest
imagePullPolicy: Always
additionalVolumes:
- name: vikunja-config
configMap:
name: vikunja-config
- name: files
persistentVolumeClaim:
claimName: vikunja-files
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: vikunja.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "vikunja.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -0,0 +1,23 @@
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: vikunja-restart
namespace: data
spec:
schedule: "@weekly"
jobTemplate:
spec:
template:
metadata:
name: vikunja-restart
spec:
serviceAccountName: jobs
containers:
- name: tester
image: bitnami/kubectl:1.20.5
command:
- "bin/bash"
- "-c"
- "bash <(curl -s https://raw.githubusercontent.com/auricom/home-cluster/main/scripts/jobs-vikunja-restart.bash)"
restartPolicy: Never

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- job.yaml
- volume.yaml

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: vikunja-files
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- nfs-backups-data.yaml
- nfs-home-claude-data.yaml
- nfs-home-helene-data.yaml
- nfs-music-data.yaml
- nfs-photo-data.yaml
- nfs-video-data.yaml
- nfs-shared-documents-data.yaml

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-backups-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-backups-data
namespace: data
spec:
storageClassName: nfs-backups-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/backups"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backups-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-backups-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-home-claude-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-home-claude-data
namespace: data
spec:
storageClassName: nfs-home-claude-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/home/claude"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-home-claude-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-home-claude-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-home-helene-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-home-helene-data
namespace: data
spec:
storageClassName: nfs-home-helene-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/home/helene"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-home-helene-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-home-helene-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-music-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-music-data
namespace: data
spec:
storageClassName: nfs-music-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/music"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-music-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-music-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-photo-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-photo-data
namespace: data
spec:
storageClassName: nfs-photo-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/photo"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-photo-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-photo-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-shared-documents-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-shared-documents-data
namespace: data
spec:
storageClassName: nfs-shared-documents-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/shared-documents"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-shared-documents-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-shared-documents-data
resources:
requests:
storage: 1Mi

View File

@@ -0,0 +1,37 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-video-data
namespace: data
provisioner: nfs
reclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-video-data
namespace: data
spec:
storageClassName: nfs-video-data
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: truenas
path: "/mnt/storage/video"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-video-data
namespace: data
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-video-data
resources:
requests:
storage: 1Mi