♻️ media-servers

This commit is contained in:
auricom
2022-09-15 07:46:38 +02:00
parent c574749270
commit bdae482e3b
71 changed files with 505 additions and 1282 deletions

View File

@@ -3,18 +3,17 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app flood
namespace: media
namespace: default
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
chart: app-template
version: 0.1.1
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
name: bjw-s-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
@@ -23,43 +22,27 @@ spec:
remediation:
retries: 5
values:
global:
nameOverride: *app
image:
repository: jesec/flood
tag: 4.7.0
env:
FLOOD_OPTION_RUNDIR: /data
FLOOD_OPTION_AUTH: "none"
FLOOD_OPTION_QBURL: "http://qbittorrent:8080"
FLOOD_OPTION_QBUSER: admin
FLOOD_OPTION_QBPASS: ${SECRET_QBITTORRENT_PASSWORD}
podSecurityContext:
runAsUser: 1001
runAsGroup: 1001
fsGroup: 1001
persistence:
data:
enabled: true
existingClaim: flood-config
mountPath: /data
FLOOD_OPTION_QBURL: "http://qbittorrent.default.svc.cluster.local."
envFrom:
- secretRef:
name: *app
service:
main:
ports:
http:
port: 3000
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://authelia.default.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_CLUSTER_DOMAIN}"
auth.home.arpa/enabled: "true"
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
@@ -68,7 +51,16 @@ spec:
tls:
- hosts:
- *host
podSecurityContext:
runAsUser: 1001
runAsGroup: 1001
fsGroup: 1001
fsGroupChangePolicy: "OnRootMismatch"
persistence:
data:
enabled: true
existingClaim: flood-config
mountPath: /data
resources:
requests:
memory: 250Mi

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- secret.sops.yaml
- volume.yaml
- helm-release.yaml

View File

@@ -0,0 +1,30 @@
# yamllint disable
apiVersion: v1
kind: Secret
metadata:
name: flood
namespace: default
type: Opaque
stringData:
FLOOD_OPTION_QBUSER: ENC[AES256_GCM,data:wwb74Ok=,iv:bLa7BU9lqiUKUqO5hLaMKE50ovxUJzJnaEMu9QSX6wQ=,tag:VQjtK4T8AOQIvPEujTOfcA==,type:str]
FLOOD_OPTION_QBPASS: ENC[AES256_GCM,data:8PzsOc2NNHkY8kRVB3z/62W4peA=,iv:pbRQ+I9IBAY/+QYfVKuNGUr4zYAawUzqdbG8IeETIhQ=,tag:X8O0AitScHuBXcoePprZ1Q==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBoN0VJaHVYcXNDZDlZUGRn
YUViZDU0TCtmbzkycUpiZUVDbkluSzdSM2hVClpMRDdKREJBZEpEYUIxUGlIem9Q
Z08rVUVLUFhWNGdncElCR2hFVFNJUEUKLS0tIDZzcDVyb0lMTzRrNStBRU1KN2wy
OU81anNCMk13bXNXRVM3ZWcxTjd6SUkKd5FvLfeXe4p7j5eryl9ZuVh6oT920yiy
hsaI1Cwm2WH55lR++P1jtIyTo+lOL5M+IZUeyC7LXBpMp2UBNbllcw==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-09-15T05:40:26Z"
mac: ENC[AES256_GCM,data:hwIHegLoNt6vHq1Dj3sispmAoByMN25HAG/koTtaNSCs94W4JbGGqJ+6waXX9vlWyWux6gJw8Y4j71BnjfP5Fhk4sTkS2N30XrNt/B4+95jO4u4spfZ5MPzb4FE5qIVaqDliDbhj50GA2eruVtYgGgJ4oCADWGI+iJZYyKnuUNQ=,iv:w9lUfjBF194TQQjUGzPBOpbYeey6eOG8heU7QKYF2gk=,tag:xiTESQOcm/PGaIYZqLgFQQ==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.3

View File

@@ -3,8 +3,10 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: flood-config
namespace: media
namespace: default
labels:
app.kubernetes.io/name: &name flood
app.kubernetes.io/instance: *name
kasten-io/backup: "true"
spec:
accessModes:

View File

@@ -3,6 +3,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- flood
- pyload
- qbittorrent
- qbittorrent-jobs
- sabnzbd

View File

@@ -3,18 +3,17 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app pyload
namespace: media
namespace: default
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
chart: app-template
version: 0.1.1
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
name: bjw-s-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
@@ -23,16 +22,30 @@ spec:
remediation:
retries: 5
values:
global:
nameOverride: *app
image:
repository: linuxserver/pyload-ng
tag: develop-version-f2633b80
env:
TZ: "${TIMEZONE}"
service:
main:
ports:
http:
port: 8000
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
auth.home.arpa/enabled: "true"
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
persistence:
config:
enabled: true
@@ -44,30 +57,6 @@ spec:
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/downloads
mountPath: /mnt/storage/downloads
service:
main:
ports:
http:
port: 8000
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://authelia.default.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_CLUSTER_DOMAIN}"
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
resources:
requests:
cpu: 100m

View File

@@ -2,5 +2,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml
- helm-release.yaml

View File

@@ -3,8 +3,10 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pyload-config
namespace: media
namespace: default
labels:
app.kubernetes.io/name: &name pyload
app.kubernetes.io/instance: *name
kasten-io/backup: "true"
spec:
accessModes:

View File

@@ -11,8 +11,9 @@ resources:
- home-automation
- kube-tools
#- logs
- media
- media-automation
- media-servers
- monitoring
- networking
- storage
- web-tools

View File

@@ -2,10 +2,10 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: calibre-web
namespace: media
name: &app calibre-web
namespace: default
spec:
interval: 5m
interval: 15m
chart:
spec:
chart: calibre-web
@@ -14,18 +14,22 @@ spec:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
values:
image:
repository: ghcr.io/linuxserver/calibre-web
tag: amd64-version-0.6.18
env:
TZ: "${TIMEZONE}"
PUID: "1026"
PGID: "1000"
DOCKER_MODS: "amd64-version-0.6.18"
persistence:
config:
enabled: true
@@ -36,7 +40,6 @@ spec:
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/home/claude/books
mountPath: /mnt/storage/home/claude/books
ingress:
main:
enabled: true
@@ -50,7 +53,6 @@ spec:
tls:
- hosts:
- *host
resources:
requests:
cpu: 15m

View File

@@ -1,5 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml
- helm-release.yaml

View File

@@ -2,14 +2,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bazarr-config
namespace: media
name: calibre-web-config
namespace: default
labels:
kasten-io/backup: "true"
app.kubernetes.io/name: &name calibre-web
app.kubernetes.io/instance: *name
snapshot.home.arpa/enabled: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 1Gi
storageClassName: rook-ceph-block

View File

@@ -2,10 +2,10 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: calibre
namespace: media
name: &app calibre
namespace: default
spec:
interval: 5m
interval: 15m
chart:
spec:
chart: calibre
@@ -14,17 +14,21 @@ spec:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
values:
image:
repository: ghcr.io/linuxserver/calibre
tag: version-v6.4.0
env:
TZ: "${TIMEZONE}"
PUID: "1026"
PGID: "1000"
persistence:
config:
enabled: true
@@ -35,18 +39,15 @@ spec:
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/home/claude/books
mountPath: /mnt/storage/home/claude/books
service:
webserver:
enabled: true
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://authelia.default.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_CLUSTER_DOMAIN}"
auth.home.arpa/enabled: "true"
hosts:
- host: &host "calibre.${SECRET_CLUSTER_DOMAIN}"
paths:
@@ -55,7 +56,6 @@ spec:
tls:
- hosts:
- *host
resources:
requests:
cpu: 15m

View File

@@ -1,5 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml
- helm-release.yaml

View File

@@ -3,13 +3,15 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: calibre-config
namespace: media
namespace: default
labels:
kasten-io/backup: "true"
app.kubernetes.io/name: &name calibre
app.kubernetes.io/instance: *name
snapshot.home.arpa/enabled: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 1Gi
storageClassName: rook-ceph-block

View File

@@ -3,18 +3,17 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app jellyfin
namespace: media
namespace: default
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
chart: app-template
version: 0.1.1
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
name: bjw-s-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
@@ -22,14 +21,46 @@ spec:
upgrade:
remediation:
retries: 5
dependsOn:
- name: intel-gpu-plugin
namespace: default
- name: node-feature-discovery
namespace: default
values:
global:
nameOverride: *app
image:
repository: ghcr.io/k8s-at-home/jellyfin
tag: v10.8.4
repository: ghcr.io/onedr0p/jellyfin
tag: 10.8.4@sha256:63b837c627d740c7965aeba0ec5c83146926c9a60b25890ae75b0c67737d2ac4
env:
TZ: "${TIMEZONE}"
service:
main:
type: LoadBalancer
externalIPs: ["${CLUSTER_LB_JELLYFIN}"]
externalTrafficPolicy: Local
ports:
http:
port: 8096
ingress:
main:
enabled: true
ingressClassName: "nginx"
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
podSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: "OnRootMismatch"
supplementalGroups:
- 44
- 109
- 100
persistence:
config:
enabled: true
@@ -53,36 +84,9 @@ spec:
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/video
mountPath: /mnt/storage/video
service:
main:
ports:
http:
port: 8096
ingress:
main:
transcode:
enabled: true
ingressClassName: "nginx"
annotations:
external-dns.alpha.kubernetes.io/target: "services.${SECRET_DOMAIN}."
external-dns/is-public: "true"
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
resources:
requests:
memory: 4Gi
cpu: 1
limits:
gpu.intel.com/i915: 1
type: emptyDir
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -92,3 +96,11 @@ spec:
operator: In
values:
- "true"
resources:
requests:
gpu.intel.com/i915: 1
cpu: 1
memory: 4Gi
limits:
gpu.intel.com/i915: 1
memory: 6Gi

View File

@@ -1,5 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml
- helm-release.yaml

View File

@@ -3,13 +3,15 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-config
namespace: media
namespace: default
labels:
app.kubernetes.io/name: &name jellyfin
app.kubernetes.io/instance: *name
kasten-io/backup: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 30Gi
storageClassName: rook-ceph-block

View File

@@ -3,18 +3,17 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app komga
namespace: media
namespace: default
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
chart: app-template
version: 0.1.1
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
name: bjw-s-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
@@ -23,43 +22,24 @@ spec:
remediation:
retries: 5
values:
global:
nameOverride: *app
image:
repository: gotson/komga
tag: "0.157.2"
env:
TZ: "${TIMEZONE}"
podSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
persistence:
config:
enabled: true
existingClaim: komga-config
mountPath: /config
comics:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/home/claude/comics
mountPath: /mnt/storage/home/claude/comics
SERVER_PORT: &port 80
service:
main:
ports:
http:
port: 8080
port: *port
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
auth.home.arpa/enabled: "true"
external-dns.home.arpa/enabled: "true"
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
@@ -68,10 +48,26 @@ spec:
tls:
- hosts:
- *host
podSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: "OnRootMismatch"
supplementalGroups:
- 100
persistence:
config:
enabled: true
existingClaim: komga-config
comics:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/home/claude/comics
mountPath: /mnt/storage/home/claude/comics
resources:
requests:
memory: 500Mi
cpu: 500m
limits:
memory: 4000Mi
memory: 4Gi

View File

@@ -1,5 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml
- helm-release.yaml

View File

@@ -3,13 +3,15 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: komga-config
namespace: media
namespace: default
labels:
kasten-io/backup: "true"
app.kubernetes.io/name: &name komga
app.kubernetes.io/instance: *name
snapshot.home.arpa/enabled: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 20Gi
storageClassName: rook-ceph-block

View File

@@ -1,16 +1,12 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- namespace.yaml
- calibre
- calibre-web
- flood
- jellyfin
- jobs
- komga
- lychee
- music_transcode
- media-browser
- navidrome
- pyload
- theme-park
# - travelstories

View File

@@ -3,18 +3,17 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app lychee
namespace: media
namespace: default
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
chart: app-template
version: 0.1.1
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
name: bjw-s-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
@@ -22,23 +21,47 @@ spec:
upgrade:
remediation:
retries: 5
dependsOn:
- name: postgres
namespace: default
- name: redis
namespace: default
values:
global:
nameOverride: *app
image:
repository: lycheeorg/lychee-laravel
tag: v4.6.0
env:
PHP_TZ: ${TIMEZONE}
TIMEZONE: ${TIMEZONE}
APP_NAME: Lychee
DB_CONNECTION: pgsql
DB_HOST: postgres.${SECRET_DOMAIN}
DB_HOST: postgres-rw.default.svc.cluster.local
DB_PORT: 5432
DB_DATABASE: lychee
DB_USERNAME: lychee
DB_PASSWORD: ${SECRET_LYCHEE_DB_PASSWORD}
REDIS_HOST: redis.default.svc.cluster.local
REDIS_PORT: 6379
envFrom:
- secretRef:
name: *app
service:
main:
ports:
http:
port: 80
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
auth.home.arpa/enabled: "true"
external-dns.home.arpa/enabled: "true"
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
persistence:
photo:
enabled: true
@@ -51,29 +74,6 @@ spec:
enabled: true
mountPath: /uploads
existingClaim: lychee-files
service:
main:
ports:
http:
port: 80
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
external-dns.alpha.kubernetes.io/target: "services.${SECRET_DOMAIN}."
external-dns/is-public: "true"
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
resources:
requests:
cpu: 100m

View File

@@ -0,0 +1,9 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- volume.yaml
- secret.sops.yaml
- helm-release.yaml
patchesStrategicMerge:
- patches/postgres.yaml

View File

@@ -0,0 +1,31 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: lychee
namespace: default
spec:
values:
initContainers:
init-db:
image: ghcr.io/onedr0p/postgres-initdb:14.5
env:
- name: POSTGRES_HOST
value: postgres-rw.default.svc.cluster.local
- name: POSTGRES_DB
value: lychee
- name: POSTGRES_SUPER_PASS
valueFrom:
secretKeyRef:
name: postgres-superuser
key: password
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: lychee
key: DB_USERNAME
- name: POSTGRES_PASS
valueFrom:
secretKeyRef:
name: lychee
key: DB_PASSWORD

View File

@@ -0,0 +1,30 @@
# yamllint disable
apiVersion: v1
kind: Secret
metadata:
name: lychee
namespace: default
type: Opaque
stringData:
DB_USERNAME: ENC[AES256_GCM,data:AYtw694u,iv:WiWUnIxv44F3hP69AMe1iZCO6+E2zG19KtyhACFG9Xs=,tag:Nqozw/OhXSR4AqtsrV/c+Q==,type:str]
DB_PASSWORD: ENC[AES256_GCM,data:M5kb0xQ7owTY2EFs00U=,iv:zxULHd/EDwr1DbhPPXJ5hH3gb1NvKilkJKV+X5LL9wA=,tag:HYa8DVQT9M2P2ISFTeCdLA==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBJaU16anJNV2pBZmxPR3h2
bWREUnpjcTFvd05ZQ2E4VVBDdm1FL2k4WEYwCkdQSStTNWtpdjNkUW51WS9MekdC
VkpTUUFjSjY2a1JMOUtqOVh5M0JRR2sKLS0tIDRmcWpJSEVvaUp4U1lsaTZYZGNw
OGVKWU0zNUZJSFh4aFJxQWFsYm1VeFkKaDeI/hl7z0Qh8t5W39Kxu9ert1dt4xo+
LX+MjpVqxiZNcfwROD4bkWeQSN+VsxoGOOyj4L15BlggNnlg+L7Hww==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-09-15T05:55:48Z"
mac: ENC[AES256_GCM,data:UWUyj6I6lndX3usRwDhF1EvY5LZ+zRmk0M2MGMUduBTr7+vgNvsV3brugkMPBJcHNmxkyyDh+r+rC8vIE+1BH8P/eYxB1DP0DAQIcieuVto40dIKh0z426VkeF6tD+zHyCWeeUxjVWwxrls7jiTDBWuSk7PSD6VhqBJqMJ2IuiE=,iv:GUz7JE6HXmIApfSgOlRvm4wAlkMFci/tudFU/uLZ/Yc=,tag:qcAAlGszHcuHbZ0uXwYB/A==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.3

View File

@@ -2,14 +2,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lychee-files
namespace: media
name: lychee-config
namespace: default
labels:
kasten-io/backup: "true"
app.kubernetes.io/name: &name lychee
app.kubernetes.io/instance: *name
snapshot.home.arpa/enabled: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 100Gi
storageClassName: rook-ceph-block

View File

@@ -0,0 +1,93 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app media-browser
namespace: default
spec:
interval: 15m
chart:
spec:
chart: app-template
version: 0.1.1
sourceRef:
kind: HelmRepository
name: bjw-s-charts
namespace: flux-system
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
values:
image:
repository: docker.io/filebrowser/filebrowser
tag: v2.22.4
env:
TZ: "${TIMEZONE}"
FB_DATABASE: "/config/filebrowser.db"
FB_ROOT: "/media"
FB_LOG: "stdout"
FB_NOAUTH: "true"
service:
main:
ports:
http:
port: &port 80
probes:
liveness: &probes
enabled: true
custom: true
spec:
httpGet:
path: /health
port: *port
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
readiness: *probes
startup:
enabled: false
ingress:
main:
enabled: true
ingressClassName: "nginx"
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
persistence:
config:
enabled: true
existingClaim: media-browser-config
music:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/music
mountPath: /mnt/storage/music
photo:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/photo
mountPath: /mnt/storage/photo
video:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/video
mountPath: /mnt/storage/video
resources:
requests:
cpu: 10m
memory: 50Mi
limits:
memory: 500Mi

View File

@@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- volume.yaml
- helm-release.yaml

View File

@@ -2,14 +2,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: calibre-web-config
namespace: media
name: media-browser-config
namespace: default
labels:
app.kubernetes.io/name: &name media-browser
app.kubernetes.io/instance: *name
kasten-io/backup: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 1Gi
storageClassName: rook-ceph-block

View File

@@ -3,18 +3,17 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app navidrome
namespace: media
namespace: default
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
chart: app-template
version: 0.1.1
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
name: bjw-s-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
@@ -23,47 +22,51 @@ spec:
remediation:
retries: 5
values:
global:
nameOverride: *app
image:
repository: docker.io/deluan/navidrome
tag: 0.47.5
repository: ghcr.io/onedr0p/navidrome
tag: 0.47.5@sha256:17ef739628ad46a05d111ac9324a8b8cb89a7e2bbb1c1277a01b08d395243b64
env:
TZ: "${TIMEZONE}"
ND_LOGLEVEL: "info"
ND_SCANSCHEDULE: "1h"
ND_SESSIONTIMEOUT: 24h
ND_DATAFOLDER: /config
ND_ENABLEGRAVATAR: "true"
ND_LOGLEVEL: info
ND_MUSICFOLDER: /mnt/storage/music/Artistes
ND_PORT: &port 80
ND_REVERSEPROXYUSERHEADER: "Remote-User"
ND_REVERSEPROXYWHITELIST: "${NET_POD_CIDR}"
ND_SCANSCHEDULE: "@every 1h"
ND_SESSIONTIMEOUT: 24h
service:
main:
ports:
http:
port: 4533
port: *port
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
external-dns.alpha.kubernetes.io/target: "services.${SECRET_DOMAIN}."
external-dns/is-public: "true"
auth.home.arpa/enabled: "true"
external-dns.home.arpa/enabled: "true"
hosts:
- host: &host-release "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
- host: &host-custom "music.${SECRET_CLUSTER_DOMAIN}"
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host-release
- *host-custom
- *host
podSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: "OnRootMismatch"
supplementalGroups:
- 100
persistence:
config:
enabled: true
existingClaim: navidrome-config
mountPath: /data
music:
enabled: true
type: nfs

View File

@@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- volume.yaml
- helm-release.yaml

View File

@@ -3,13 +3,15 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: navidrome-config
namespace: media
namespace: default
labels:
kasten-io/backup: "true"
app.kubernetes.io/name: &name navidrome
app.kubernetes.io/instance: *name
snapshot.home.arpa/enabled: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 1Gi
storageClassName: rook-ceph-block

View File

@@ -1,83 +0,0 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app bazarr
namespace: media
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
values:
global:
nameOverride: *app
image:
repository: ghcr.io/k8s-at-home/bazarr
tag: v1.1.1
env:
TZ: "${TIMEZONE}"
podSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
persistence:
config:
enabled: true
existingClaim: bazarr-config
video:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/video
mountPath: /mnt/storage/video
service:
main:
ports:
http:
port: 6767
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://authelia.default.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_CLUSTER_DOMAIN}"
# nginx.ingress.kubernetes.io/configuration-snippet: |
# proxy_set_header Accept-Encoding "";
# sub_filter '</head>' '<link rel="stylesheet" type="text/css" href="https://theme-park.${SECRET_CLUSTER_DOMAIN}/css/base/bazarr/nord.css"></head>';
# sub_filter_once on;
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
memory: 1Gi

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -1,4 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- serviceaccount.yaml

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: jobs
namespace: media
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: jobs-edit
namespace: media
subjects:
- kind: ServiceAccount
name: jobs
roleRef:
kind: ClusterRole
name: edit
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -1,91 +0,0 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app lidarr
namespace: media
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
values:
global:
nameOverride: *app
image:
repository: ghcr.io/k8s-at-home/lidarr-develop
tag: v1.1.0.2649
env:
TZ: "${TIMEZONE}"
podSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
persistence:
config:
enabled: true
existingClaim: lidarr-config
mountPath: /config
music:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/music
mountPath: /mnt/storage/music
downloads:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/downloads
mountPath: /mnt/storage/downloads
service:
main:
ports:
http:
port: 8686
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.default.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_CLUSTER_DOMAIN}"
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Accept-Encoding "";
sub_filter '</head>' '<link rel="stylesheet" type="text/css" href="https://theme-park.${SECRET_CLUSTER_DOMAIN}/css/base/lidarr/nord.css"></head>';
sub_filter_once on;
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
resources:
requests:
memory: 500Mi
cpu: 500m
limits:
memory: 1500Mi

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -1,15 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lidarr-config
namespace: media
labels:
kasten-io/backup: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 20Gi

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -1,5 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: media

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -1,74 +0,0 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app prowlarr
namespace: media
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
values:
global:
nameOverride: *app
image:
repository: ghcr.io/k8s-at-home/prowlarr-nightly
tag: v0.4.6.1969
pullPolicy: IfNotPresent
env:
TZ: "${TIMEZONE}"
persistence:
config:
enabled: true
existingClaim: prowlarr-config
mountPath: /config
service:
main:
ports:
http:
port: 9696
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://authelia.default.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_CLUSTER_DOMAIN}"
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Accept-Encoding "";
sub_filter '</head>' '<link rel="stylesheet" type="text/css" href="https://theme-park.${SECRET_CLUSTER_DOMAIN}/css/base/prowlarr/nord.css"></head>';
sub_filter_once on;
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
resources:
requests:
memory: 100Mi
cpu: 100m
limits:
memory: 1000Mi

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -1,15 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: prowlarr-config
namespace: media
labels:
kasten-io/backup: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 1Gi

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -1,91 +0,0 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app radarr
namespace: media
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
values:
global:
nameOverride: *app
image:
repository: ghcr.io/k8s-at-home/radarr
tag: v4.1.0.6175
env:
TZ: "${TIMEZONE}"
podSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
persistence:
config:
enabled: true
existingClaim: radarr-config
mountPath: /config
downloads:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/downloads
mountPath: /mnt/storage/downloads
video:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/video
mountPath: /mnt/storage/video
service:
main:
ports:
http:
port: 7878
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.default.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_CLUSTER_DOMAIN}"
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Accept-Encoding "";
sub_filter '</head>' '<link rel="stylesheet" type="text/css" href="https://theme-park.${SECRET_CLUSTER_DOMAIN}/css/base/radarr/nord.css"></head>';
sub_filter_once on;
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
resources:
requests:
memory: 500Mi
cpu: 500m
limits:
memory: 1500Mi

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -1,15 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-config
namespace: media
labels:
kasten-io/backup: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 20Gi

View File

@@ -1,85 +0,0 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app readarr
namespace: media
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
values:
global:
nameOverride: *app
image:
repository: ghcr.io/k8s-at-home/readarr
tag: v0.1.1.1352
env:
TZ: "${TIMEZONE}"
persistence:
config:
enabled: true
existingClaim: readarr-config
mountPath: /config
books:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/home/claude/books
mountPath: /mnt/storage/home/claude/books
downloads:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/downloads
mountPath: /mnt/storage/downloads
service:
main:
ports:
http:
port: 8787
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://authelia.default.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_CLUSTER_DOMAIN}"
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Accept-Encoding "";
sub_filter '</head>' '<link rel="stylesheet" type="text/css" href="https://theme-park.${SECRET_CLUSTER_DOMAIN}/css/base/readarr/nord.css"></head>';
sub_filter_once on;
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
resources:
requests:
memory: 100Mi
cpu: 100m
limits:
memory: 750Mi

View File

@@ -1,15 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: readarr-config
namespace: media
labels:
kasten-io/backup: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 1Gi

View File

@@ -1,82 +0,0 @@
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: &app recyclarr
namespace: media
spec:
schedule: "0 3 * * *"
concurrencyPolicy: "Forbid"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 5
jobTemplate:
spec:
backoffLimit: 3
ttlSecondsAfterFinished: 300
template:
spec:
automountServiceAccountToken: false
restartPolicy: OnFailure
initContainers:
- name: render-configs
image: ghcr.io/k8s-at-home/recyclarr:v2.5.0
env:
- name: RADARR_API_KEY
value: ${SECRET_RADARR_API_KEY}
- name: SONARR_API_KEY
value: ${SECRET_SONARR_API_KEY}
command:
- "/bin/bash"
- -c
args:
- "envsubst < /config/recyclarr.yaml > /shared/recyclarr.yaml"
volumeMounts:
- name: config
mountPath: /config
- name: shared
mountPath: /shared
containers:
- name: sonarr
image: ghcr.io/k8s-at-home/recyclarr:v2.5.0
imagePullPolicy: IfNotPresent
env:
- name: TZ
value: "${TIMEZONE}"
command:
- /app/recyclarr
args:
- sonarr
- --app-data
- /config
- --config
- /config/recyclarr.yaml
volumeMounts:
- name: shared
mountPath: /config/recyclarr.yaml
subPath: recyclarr.yaml
readOnly: true
- name: radarr
image: ghcr.io/k8s-at-home/recyclarr:v2.5.0
imagePullPolicy: IfNotPresent
env:
- name: TZ
value: "${TIMEZONE}"
command:
- /app/recyclarr
args:
- radarr
- --app-data
- /config
- --config
- /config/recyclarr.yaml
volumeMounts:
- name: shared
mountPath: /config/recyclarr.yaml
subPath: recyclarr.yaml
readOnly: true
volumes:
- name: config
configMap:
name: *app
- name: shared
emptyDir: {}

View File

@@ -1,14 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cronjob.yaml
namespace: media
configMapGenerator:
- name: recyclarr
files:
- recyclarr.yaml
generatorOptions:
disableNameSuffixHash: true
annotations:
kustomize.toolkit.fluxcd.io/substitute: disabled

View File

@@ -1,29 +0,0 @@
---
# A starter config to use with Trash Updater. Most values are set to "reasonable defaults".
# Update the values below as needed for your instance. You will be required to update the
# API Key and URL for each instance you want to use.
#
# Many optional settings have been omitted to keep this template simple.
#
# For more details on the configuration, see the Configuration Reference on the wiki here:
# https://github.com/rcdailey/trash-updater/wiki/Configuration-Reference
sonarr:
- base_url: http://sonarr:8989
api_key: ${SONARR_API_KEY}
quality_definition: hybrid
release_profiles:
- trash_ids:
- EBC725268D687D588A20CBC5F97E538B # Low Quality Groups
- 1B018E0C53EC825085DD911102E2CA36 # Release Sources (Streaming Service)
- 71899E6C303A07AF0E4746EFF9873532 # P2P Groups + Repack/Proper
- d428eda85af1df8904b4bbe4fc2f537c # Anime - First release profile
- 6cd9e10bb5bb4c63d2d7cd3279924c7b # Anime - Second release profile
radarr:
- base_url: http://radarr:7878
api_key: ${RADARR_API_KEY}
delete_old_custom_formats: true
quality_definition:
type: movie
preferred_ratio: 0.5

View File

@@ -1,91 +0,0 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app sonarr
namespace: media
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
values:
global:
nameOverride: *app
image:
repository: ghcr.io/k8s-at-home/sonarr
tag: v3.0.9.1549
env:
TZ: "${TIMEZONE}"
podSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
persistence:
config:
enabled: true
existingClaim: sonarr-config
mountPath: /config
downloads:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/downloads
mountPath: /mnt/storage/downloads
video:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/video
mountPath: /mnt/storage/video
service:
main:
ports:
http:
port: 8989
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.default.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_CLUSTER_DOMAIN}"
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Accept-Encoding "";
sub_filter '</head>' '<link rel="stylesheet" type="text/css" href="https://theme-park.${SECRET_CLUSTER_DOMAIN}/css/base/sonarr/nord.css"></head>';
sub_filter_once on;
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
resources:
requests:
memory: 500Mi
cpu: 500m
limits:
memory: 1500Mi

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml

View File

@@ -1,15 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-config
namespace: media
labels:
kasten-io/backup: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 20Gi

View File

@@ -1,86 +0,0 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: tdarr
namespace: media
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://k8s-at-home.com/charts/
chart: tdarr
version: 4.6.2
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
values:
image:
repository: haveagitgat/tdarr
tag: 2.00.18
pullPolicy: IfNotPresent
env:
TZ: ${TIMEZONE}
webUIPort: 8265
serverIP: 0.0.0.0
serverPort: 8266
node:
enabled: true
id: node
image:
repository: haveagitgat/tdarr_node
tag: 2.00.18
pullPolicy: IfNotPresent
persistence:
data:
enabled: true
mountPath: /app/server
existingClaim: tdarr-data
music:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/music
mountPath: /mnt/storage/music
video:
enabled: true
type: nfs
server: "${LOCAL_LAN_TRUENAS}"
path: /mnt/storage/video
mountPath: /mnt/storage/video
service:
main:
ports:
http:
port: 8265
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
server:
enabled: true
protocol: TCP
port: 8266
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://authelia.default.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_CLUSTER_DOMAIN}"
# traefik.ingress.kubernetes.io/router.entrypoints: "websecure"
# traefik.ingress.kubernetes.io/router.middlewares: networking-forward-auth@kubernetescrd
hosts:
- host: "tdarr.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "tdarr.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- volume.yaml
- service.yaml

View File

@@ -1,22 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
name: tdarr-server
namespace: media
spec:
ports:
- name: server
port: 8266
protocol: TCP
targetPort: 8266
selector:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
type: LoadBalancer
externalIPs:
- ${CLUSTER_LB_TDARR}
externalTrafficPolicy: Local

View File

@@ -1,15 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tdarr-data
namespace: media
labels:
kasten-io/backup: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 5Gi

View File

@@ -1,100 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: travelstories
namespace: media
labels:
app.kubernetes.io/instance: travelstories
app.kubernetes.io/name: travelstories
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: travelstories
app.kubernetes.io/name: travelstories
template:
metadata:
labels:
app.kubernetes.io/instance: travelstories
app.kubernetes.io/name: travelstories
spec:
imagePullSecrets:
- name: regcred
containers:
- name: travelstories
image: registry.${SECRET_CLUSTER_DOMAIN}/homelab/travelstories:1.0.1
imagePullPolicy: Always
ports:
- containerPort: 80
name: http
volumeMounts:
- name: caddyfile
mountPath: /etc/caddy/Caddyfile
subPath: Caddyfile
volumes:
- name: caddyfile
configMap:
name: travelstories-caddyfile
---
apiVersion: v1
kind: ConfigMap
metadata:
name: travelstories-caddyfile
namespace: media
data:
Caddyfile: |+
:80
file_server
root * /srv
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
labels:
app.kubernetes.io/instance: travelstories
app.kubernetes.io/name: travelstories
name: travelstories
namespace: media
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
selector:
app.kubernetes.io/instance: travelstories
app.kubernetes.io/name: travelstories
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://authelia.default.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_CLUSTER_DOMAIN}"
labels:
app.kubernetes.io/instance: travelstories
app.kubernetes.io/name: travelstories
name: travelstories
namespace: media
spec:
ingressClassName: "nginx"
rules:
- host: "travelstories.${SECRET_CLUSTER_DOMAIN}"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: travelstories
port:
number: 80
tls:
- hosts:
- "tdarr.${SECRET_CLUSTER_DOMAIN}"

View File

@@ -1,4 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- music-transcode
- theme-park

View File

@@ -3,7 +3,7 @@ apiVersion: batch/v1
kind: CronJob
metadata:
name: music-transcode
namespace: media
namespace: default
spec:
schedule: "0 2 * * *"
suspend: true

View File

@@ -3,18 +3,17 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app theme-park
namespace: media
namespace: default
spec:
interval: 15m
chart:
spec:
chart: kah-common-chart
version: 1.2.2
chart: app-template
version: 0.1.1
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
name: bjw-s-charts
namespace: flux-system
interval: 15m
install:
createNamespace: true
remediation:
@@ -23,11 +22,12 @@ spec:
remediation:
retries: 5
values:
global:
nameOverride: *app
controller:
replicas: 3
strategy: RollingUpdate
image:
repository: ghcr.io/k8s-at-home/theme-park
tag: v1.10.1
repository: ghcr.io/onedr0p/theme-park
tag: 1.10.1@sha256:0dcde933654316b6d0f4c9c8d4130f3a95e02927d753952f5199fe348627d7b5
service:
main:
ports:
@@ -37,6 +37,8 @@ spec:
main:
enabled: true
ingressClassName: "nginx"
annotations:
external-dns.home.arpa/enabled: "true"
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
paths:
@@ -45,9 +47,16 @@ spec:
tls:
- hosts:
- *host
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/name: *app
resources:
requests:
cpu: 10m
memory: 50Mi
cpu: 5m
memory: 10Mi
limits:
memory: 150Mi
memory: 50Mi

View File

@@ -1,3 +1,4 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:

View File

@@ -15,7 +15,7 @@ data:
CLUSTER_LB_HASS: 192.168.169.107
CLUSTER_LB_SYSLOG: 192.168.169.108
CLUSTER_LB_EMQX: 192.168.169.109
CLUSTER_LB_TDARR: 192.168.169.110
CLUSTER_LB_JELLYFIN: 192.168.169.110
LOCAL_LAN: 192.168.8.0/22
LOCAL_LAN_OPNSENSE: 192.168.8.1
LOCAL_LAN_TRUENAS: 192.168.9.10