new cluster

This commit is contained in:
auricom
2021-03-21 01:33:51 +01:00
parent 49718cb277
commit d527627d28
105 changed files with 1835 additions and 2181 deletions

View File

@@ -0,0 +1,10 @@
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: coredns-charts
namespace: flux-system
spec:
interval: 10m
url: https://coredns.github.io/helm
timeout: 3m

View File

@@ -48,25 +48,13 @@ spec:
- name: config
mountPath: /config
resources:
limits:
cpu: 500m
memory: 2000Mi
requests:
cpu: 100m
cpu: 500m
memory: 1500Mi
dnsConfig:
options:
- name: ndots
value: "1"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: In
values:
- "true"
volumes:
- name: config
emptyDir: {}
@@ -99,6 +87,29 @@ spec:
protocol: TCP
port: 80
targetPort: 9091
externalTrafficPolicy: Local
type: LoadBalancer
loadBalancerIP: 192.168.9.204
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: "nginx"
labels:
app.kubernetes.io/instance: authelia
app.kubernetes.io/name: authelia
name: authelia
namespace: auth
spec:
tls:
- hosts:
- login.k3s.xpander.ovh
rules:
- host: login.k3s.xpander.ovh
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: authelia
port:
number: 80

View File

@@ -53,19 +53,6 @@ spec:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bitwarden-config
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: freshrss-config
namespace: data
@@ -79,19 +66,6 @@ spec:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: unifi-config
namespace: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: recipes-media
namespace: data

View File

@@ -74,7 +74,7 @@ spec:
port: 8000
selector:
app: archivebox
type: LoadBalancer
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress

View File

@@ -23,6 +23,7 @@ spec:
pullPolicy: IfNotPresent
env:
SIGNUPS_ALLOWED: "false"
DOMAIN: "https://bitwarden.k3s.xpander.ovh/"
bitwardenrs:
domain: ""
signupsAllowed: false
@@ -44,9 +45,16 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: bitwarden.k3s.xpander.ovh
paths:
- /
tls:
- hosts:
- "bitwarden.k3s.xpander.ovh"
persistence:
enabled: true
enabled: false
existingClaim: bitwarden-config
valuesFrom:
- kind: ConfigMap
name: helmrelease-data-bitwardenrs
- kind: ConfigMap
name: bitwardenrs-helmrelease

View File

@@ -22,6 +22,8 @@ spec:
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "v0.31.8-ls126"
env:
APP_URL: https://bookstack.k3s.xpander.ovh/
service:
port:
port: 80
@@ -33,11 +35,19 @@ spec:
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0
hosts:
- host: bookstack.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "bookstack.k3s.xpander.ovh"
persistence:
config:
enabled: true
mountPath: /config
existingClaim: bookstack-config
valuesFrom:
- kind: ConfigMap
name: helmrelease-data-bookstack
- kind: ConfigMap
name: bookstack-helmrelease

View File

@@ -27,6 +27,7 @@ spec:
env:
TZ: Europe/Paris
CRON_MIN: "18,48"
DOMAIN: "https://freshrss.k3s.xpander.ovh/"
service:
port:
port: 80
@@ -37,6 +38,14 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: freshrss.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "freshrss.k3s.xpander.ovh"
persistence:
config:
enabled: true
@@ -46,6 +55,3 @@ spec:
requests:
cpu: 50m
memory: 256Mi
valuesFrom:
- kind: ConfigMap
name: helmrelease-data-freshrss

View File

@@ -36,6 +36,17 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts:
- host: homer.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "homer.k3s.xpander.ovh"
persistence:
config:
enabled: false
@@ -315,6 +326,3 @@ spec:
- name: assets-taskcafe
configMap:
name: homer-taskcafe.png
valuesFrom:
- kind: ConfigMap
name: helmrelease-data-homer

View File

@@ -37,6 +37,7 @@ spec:
prometheus.io/protocol: tcp
env:
APP_BASE_URL: https://joplin.k3s.xpander.ovh
APP_PORT: 22300
DB_CLIENT: pg
@@ -52,7 +53,15 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: joplin.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "joplin.k3s.xpander.ovh"
valuesFrom:
- kind: ConfigMap
name: helmrelease-data-joplin-server
name: joplin-server-helmrelease

View File

@@ -44,6 +44,14 @@ spec:
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/client-body-buffer-size: "10m"
hosts:
- host: recipes.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "recipes.k3s.xpander.ovh"
persistence:
config:
enabled: false
@@ -72,4 +80,4 @@ spec:
readOnly: true
valuesFrom:
- kind: ConfigMap
name: helmrelease-data-recipes
name: recipes-helmrelease

View File

@@ -218,4 +218,43 @@ spec:
app.kubernetes.io/instance: resilio-sync
app.kubernetes.io/name: resilio-sync
type: LoadBalancer
loadBalancerIP: 192.168.9.207
externalIPs:
- 192.168.169.106
externalTrafficPolicy: Local
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: "nginx"
labels:
app.kubernetes.io/instance: resilio-sync
app.kubernetes.io/name: resilio-sync
name: resilio-sync
namespace: data
spec:
rules:
- host: resilio-sync-claude.k3s.xpander.ovh
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: resilio-sync
port:
number: 8888
- host: resilio-sync-helene.k3s.xpander.ovh
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: resilio-sync
port:
number: 8889
tls:
- hosts:
- resilio-sync-claude.k3s.xpander.ovh
- resilio-sync-helene.k3s.xpander.ovh

View File

@@ -28,10 +28,22 @@ spec:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
searx:
baseUrl: https://searx.k3s.xpander.ovh
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts:
- host: searx.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "searx.k3s.xpander.ovh"
valuesFrom:
- kind: ConfigMap
name: helmrelease-data-searx
- kind: ConfigMap
name: searx-helmrelease

View File

@@ -17,7 +17,6 @@ spec:
namespace: flux-system
interval: 5m
values:
image:
repository: eikek0/sharry
tag: 1.6.0
@@ -38,9 +37,10 @@ spec:
prometheus.io/protocol: http
args:
- "/opt/sharry.conf"
- "/opt/sharry.conf"
sharry:
baseUrl: "https://sharry.k3s.xpander.ovh"
chunkSize: "512K"
maxSize: "1.5G"
maxValidity: "31 days"
@@ -55,13 +55,13 @@ spec:
podAnnotations: {}
additionalVolumes:
- name: sharry-config
configMap:
name: sharry-config
- name: sharry-config
configMap:
name: sharry-config
additionalVolumeMounts:
- name: sharry-config
mountPath: /opt/sharry.conf
subPath: sharry.conf
- name: sharry-config
mountPath: /opt/sharry.conf
subPath: sharry.conf
ingress:
enabled: true
@@ -71,7 +71,15 @@ spec:
# -- Nginx Proxy Body Size
nginx.ingress.kubernetes.io/proxy-body-size: "2048m"
nginx.ingress.kubernetes.io/proxy-buffering: "off"
hosts:
- host: sharry.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "sharry.k3s.xpander.ovh"
valuesFrom:
- kind: ConfigMap
name: helmrelease-data-sharry
- kind: ConfigMap
name: sharry-helmrelease

View File

@@ -80,3 +80,29 @@ spec:
app.kubernetes.io/instance: tinyfilemanager
app.kubernetes.io/name: tinyfilemanager
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: "nginx"
labels:
app.kubernetes.io/instance: tinyfilemanager
app.kubernetes.io/name: tinyfilemanager
name: tinyfilemanager
namespace: data
spec:
rules:
- host: tinyfilemanager.k3s.xpander.ovh
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: tinyfilemanager
port:
number: 80
tls:
- hosts:
- tinyfilemanager.k3s.xpander.ovh

View File

@@ -56,7 +56,15 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: vikunja.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "vikunja.k3s.xpander.ovh"
valuesFrom:
- kind: ConfigMap
name: helmrelease-data-vikunja
name: vikunja-helmrelease

View File

@@ -30,11 +30,16 @@ spec:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
hosts:
- registry.k3s.xpander.ovh
tls:
- hosts:
- registry.k3s.xpander.ovh
service:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
valuesFrom:
- kind: ConfigMap
name: "helmrelease-development-docker-registry"
name: docker-registry-helmrelease
optional: false

View File

@@ -22,6 +22,6 @@ spec:
env:
KUBERNETES_NAMESPACE: development
valuesFrom:
- kind: ConfigMap
name: "helmrelease-development-drone-kubernetes-secrets"
optional: false
- kind: ConfigMap
name: drone-kubernetes-secrets-helmrelease
optional: false

View File

@@ -30,5 +30,5 @@ spec:
- development
valuesFrom:
- kind: ConfigMap
name: "helmrelease-development-drone-runner-kube"
name: drone-runner-kube-helmrelease
optional: false

View File

@@ -25,7 +25,26 @@ spec:
persistentVolume:
enabled: true
existingClaim: drone-config
env:
DRONE_SERVER_PROTO: https
DRONE_GIT_ALWAYS_AUTH: true
DRONE_LOGS_TEXT: true
DRONE_LOGS_PRETTY: true
DRONE_LOGS_COLOR: true
DRONE_SERVER_HOST: drone.k3s.xpander.ovh
DRONE_GITEA_SERVER: https://gitea.k3s.xpander.ovh
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
prometheus.io/probe: "true"
hosts:
- host: drone.k3s.xpander.ovh
paths: ["/"]
tls:
- hosts:
- drone.k3s.xpander.ovh
valuesFrom:
- kind: ConfigMap
name: "helmrelease-development-drone"
optional: false
- kind: ConfigMap
name: drone-helmrelease
optional: false

View File

@@ -27,6 +27,11 @@ spec:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/proxy-body-size: 128m
prometheus.io/probe: "true"
hosts:
- "gitea.k3s.xpander.ovh"
tls:
- hosts:
- "gitea.k3s.xpander.ovh"
service:
http:
port: 3000
@@ -38,7 +43,7 @@ spec:
port: 22
externalTrafficPolicy: Local
externalIPs:
- 192.168.9.209
- 192.168.169.104
gitea:
config:
APP_NAME: "Homelab Gitea"
@@ -73,4 +78,4 @@ spec:
port: 11211
valuesFrom:
- kind: ConfigMap
name: helmrelease-development-gitea
name: gitea-helmrelease

View File

@@ -32,9 +32,16 @@ spec:
annotations:
kubernetes.io/ingress.class: "nginx"
prometheus.io/probe: "true"
hosts:
- host: pgadmin.k3s.xpander.ovh
paths:
- /
tls:
- hosts:
- pgadmin.k3s.xpander.ovh
persistentVolume:
enabled: true
existingClaim: pgadmin-config
valuesFrom:
- kind: ConfigMap
name: "helmrelease-development-pgadmin"
- kind: ConfigMap
name: pgadmin-helmrelease

View File

@@ -29,14 +29,18 @@ spec:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
type: LoadBalancer
externalIPs:
- 192.168.9.203
externalTrafficPolicy: Local
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: hass.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "hass.k3s.xpander.ovh"
prometheus:
serviceMonitor:
enabled: false
@@ -69,6 +73,14 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: hass-config.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- hass-config.k3s.xpander.ovh
volumeMounts:
- name: config
mountPath: /config
@@ -82,4 +94,4 @@ spec:
enabled: false
valuesFrom:
- kind: ConfigMap
name: helmrelease-home-hass
name: hass-helmrelease

View File

@@ -1,4 +1,46 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: travelstories
namespace: home
labels:
app.kubernetes.io/instance: travelstories
app.kubernetes.io/name: travelstories
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: travelstories
app.kubernetes.io/name: travelstories
template:
metadata:
labels:
app.kubernetes.io/instance: travelstories
app.kubernetes.io/name: travelstories
spec:
imagePullSecrets:
- name: regcred
containers:
- name: travelstories
image: registry.k3s.xpander.ovh/homelab/travelstories:1.0.1
imagePullPolicy: Always
ports:
- containerPort: 80
name: http
volumeMounts:
- name: caddyfile
mountPath: /etc/caddy/Caddyfile
subPath: Caddyfile
volumes:
- name: caddyfile
configMap:
name: travelstories-caddyfile
dnsConfig:
options:
- name: ndots
value: "1"
---
apiVersion: v1
kind: ConfigMap
metadata:
@@ -32,3 +74,29 @@ spec:
app.kubernetes.io/instance: travelstories
app.kubernetes.io/name: travelstories
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: "nginx"
labels:
app.kubernetes.io/instance: travelstories
app.kubernetes.io/name: travelstories
name: travelstories
namespace: home
spec:
rules:
- host: travelstories.k3s.xpander.ovh
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: travelstories
port:
number: 80
tls:
- hosts:
- travelstories.k3s.xpander.ovh

View File

@@ -22,7 +22,7 @@ spec:
service:
type: LoadBalancer
externalIPs:
- 192.168.9.200
- 192.168.169.101
externalTrafficPolicy: Local
publishService:
enabled: true
@@ -48,13 +48,13 @@ spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- ingress-nginx-external
topologyKey: "kubernetes.io/hostname"
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- ingress-nginx-external
topologyKey: "kubernetes.io/hostname"
#defaultBackend:
# enabled: true
# image:

View File

@@ -1,45 +0,0 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: metallb
namespace: kube-system
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://charts.bitnami.com/bitnami
chart: metallb
version: 2.3.2
sourceRef:
kind: HelmRepository
name: bitnami-charts
namespace: flux-system
interval: 5m
values:
configInline:
address-pools:
- name: default
protocol: layer2
addresses:
- 192.168.9.200-192.168.9.254
controller:
image:
registry: docker.io
repository: bitnami/metallb-controller
tag: 0.9.5-debian-10-r5
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
speaker:
image:
registry: docker.io
repository: bitnami/metallb-speaker
tag: 0.9.5-debian-10-r4
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists

View File

@@ -45,6 +45,3 @@ spec:
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
host: longhorn.k3s.xpander.ovh
tls: false
valuesFrom:
- kind: ConfigMap
name: helmrelease-longhorn-system-longhorn

View File

@@ -1,19 +1,6 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-hdd
provisioner: driver.longhorn.io
allowVolumeExpansion: true
parameters:
numberOfReplicas: "1"
staleReplicaTimeout: "2880"
fromBackup: ""
diskSelector: "hdd,slow"
nodeSelector: "storage,slow"
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-backups
provisioner: driver.longhorn.io

View File

@@ -209,20 +209,6 @@ spec:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: local-hdd
namespace: media
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Ti
volumeName: local-hdd
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: flood-config
namespace: media
@@ -246,3 +232,17 @@ spec:
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: qbittorrent-cache
namespace: media
spec:
accessModes:
- ReadWriteOnce
storageClassName: local
volumeName: qbittorrent-cache
resources:
requests:
storage: 600Gi

View File

@@ -33,12 +33,12 @@ spec:
enabled: true
existingClaim: bazarr-config
additionalVolumeMounts:
- name: nfs-video
mountPath: "/mnt/storage/video"
- name: nfs-video
mountPath: "/mnt/storage/video"
additionalVolumes:
- name: nfs-video
persistentVolumeClaim:
claimName: nfs-video
- name: nfs-video
persistentVolumeClaim:
claimName: nfs-video
service:
annotations:
prometheus.io/probe: "true"
@@ -49,6 +49,17 @@ spec:
cpu: 500m
limits:
memory: 1500Mi
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-bazarr
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts:
- host: bazarr.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "bazarr.k3s.xpander.ovh"

View File

@@ -34,6 +34,16 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts:
- host: flood.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "flood.k3s.xpander.ovh"
persistence:
data:
enabled: true
@@ -50,4 +60,4 @@ spec:
prometheus.io/protocol: http
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-flood
name: flood-helmrelease

View File

@@ -20,7 +20,7 @@ spec:
controllerType: deployment
image:
repository: ghcr.io/k8s-at-home/jackett
tag: v0.17.743
tag: v0.17.764
pullPolicy: IfNotPresent
env:
TZ: "Europe/Paris"
@@ -42,6 +42,17 @@ spec:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-jackett
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts:
- host: jackett.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "jackett.k3s.xpander.ovh"

View File

@@ -34,28 +34,37 @@ spec:
enabled: true
existingClaim: jellyfin-config
additionalVolumeMounts:
- name: nfs-music
mountPath: "/mnt/storage/music"
- name: nfs-video
mountPath: "/mnt/storage/video"
- name: nfs-photo
mountPath: "/mnt/storage/photo"
- name: nfs-music
mountPath: "/mnt/storage/music"
- name: nfs-video
mountPath: "/mnt/storage/video"
- name: nfs-photo
mountPath: "/mnt/storage/photo"
additionalVolumes:
- name: nfs-music
persistentVolumeClaim:
claimName: nfs-music
- name: nfs-video
persistentVolumeClaim:
claimName: nfs-video
- name: nfs-photo
persistentVolumeClaim:
claimName: nfs-photo
- name: nfs-music
persistentVolumeClaim:
claimName: nfs-music
- name: nfs-video
persistentVolumeClaim:
claimName: nfs-video
- name: nfs-photo
persistentVolumeClaim:
claimName: nfs-photo
resources:
requests:
memory: 4Gi
cpu: 1
limits:
gpu.intel.com/i915: 1
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-jellyfin
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: jellyfin.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "jellyfin.k3s.xpander.ovh"

View File

@@ -33,17 +33,17 @@ spec:
enabled: true
existingClaim: lidarr-config
additionalVolumeMounts:
- name: nfs-music
mountPath: "/mnt/storage/music"
- name: local-hdd
mountPath: "/downloads"
- name: nfs-music
mountPath: "/mnt/storage/music"
- name: qbittorrent-cache
mountPath: "/downloads"
additionalVolumes:
- name: nfs-music
persistentVolumeClaim:
claimName: nfs-music
- name: local-hdd
persistentVolumeClaim:
claimName: local-hdd
- name: nfs-music
persistentVolumeClaim:
claimName: nfs-music
- name: qbittorrent-cache
persistentVolumeClaim:
claimName: qbittorrent-cache
resources:
requests:
memory: 500Mi
@@ -54,15 +54,17 @@ spec:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k3os-worker3
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-lidarr
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts:
- host: lidarr.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "lidarr.k3s.xpander.ovh"

View File

@@ -33,6 +33,14 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: lychee.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "lychee.k3s.xpander.ovh"
env:
PHP_TZ: Europe/Paris
DB_CONNECTION: pgsql
@@ -54,5 +62,5 @@ spec:
mountPath: /mnt/storage/photo
existingClaim: nfs-photo
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-lychee
- kind: ConfigMap
name: lychee-helmrelease

View File

@@ -33,6 +33,14 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: navidrome.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "navidrome.k3s.xpander.ovh"
env:
ND_SCANINTERVAL: 15m
ND_LOGLEVEL: info
@@ -48,6 +56,3 @@ spec:
enabled: true
mountPath: /mnt/storage/music/
existingClaim: nfs-music
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-navidrome

View File

@@ -29,12 +29,12 @@ spec:
enabled: true
existingClaim: pyload-config
additionalVolumeMounts:
- name: nfs-downloads
mountPath: "/mnt/storage/downloads"
- name: nfs-downloads
mountPath: "/mnt/storage/downloads"
additionalVolumes:
- name: nfs-downloads
persistentVolumeClaim:
claimName: nfs-downloads
- name: nfs-downloads
persistentVolumeClaim:
claimName: nfs-downloads
resources:
requests:
memory: 1Gi
@@ -45,15 +45,17 @@ spec:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k3os-worker3
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-pyload
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts:
- host: pyload.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "pyload.k3s.xpander.ovh"

View File

@@ -26,17 +26,17 @@ spec:
TZ: "Europe/Paris"
service:
additionalServices:
- enabled: true
nameSuffix: bittorrent
type: LoadBalancer
externalIPs:
- 192.168.9.206
port:
port: 50413
name: bittorrent
protocol: TCP
targetPort: 6881
externalTrafficPolicy: Local
- enabled: true
nameSuffix: bittorrent
type: LoadBalancer
externalIPs:
- 192.168.169.105
port:
port: 50413
name: bittorrent
protocol: TCP
targetPort: 6881
externalTrafficPolicy: Local
service:
annotations:
prometheus.io/probe: "true"
@@ -50,21 +50,30 @@ spec:
enabled: true
existingClaim: qbittorrent-config
additionalVolumeMounts:
- name: local-hdd
mountPath: "/downloads"
- name: nfs-downloads
mountPath: "/mnt/storage/downloads"
- name: qbittorrent-cache
mountPath: "/downloads"
- name: nfs-downloads
mountPath: "/mnt/storage/downloads"
additionalVolumes:
- name: local-hdd
persistentVolumeClaim:
claimName: local-hdd
- name: nfs-downloads
persistentVolumeClaim:
claimName: nfs-downloads
- name: qbittorrent-cache
persistentVolumeClaim:
claimName: qbittorrent-cache
- name: nfs-downloads
persistentVolumeClaim:
claimName: nfs-downloads
resources:
requests:
memory: 4Gi
cpu: 500m
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-qbittorrent
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: qbittorrent.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "qbittorrent.k3s.xpander.ovh"

View File

@@ -33,17 +33,17 @@ spec:
enabled: true
existingClaim: radarr-config
additionalVolumeMounts:
- name: nfs-video
mountPath: "/mnt/storage/video"
- name: local-hdd
mountPath: "/downloads"
- name: nfs-video
mountPath: "/mnt/storage/video"
- name: qbittorrent-cache
mountPath: "/downloads"
additionalVolumes:
- name: nfs-video
persistentVolumeClaim:
claimName: nfs-video
- name: local-hdd
persistentVolumeClaim:
claimName: local-hdd
- name: nfs-video
persistentVolumeClaim:
claimName: nfs-video
- name: qbittorrent-cache
persistentVolumeClaim:
claimName: qbittorrent-cache
resources:
requests:
memory: 500Mi
@@ -54,15 +54,18 @@ spec:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k3os-worker3
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-radarr
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
prometheus.io/probe: "true"
hosts:
- host: radarr.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "radarr.k3s.xpander.ovh"

View File

@@ -36,18 +36,28 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts:
- host: sonarr.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "sonarr.k3s.xpander.ovh"
additionalVolumeMounts:
- name: nfs-video
mountPath: "/mnt/storage/video"
- name: local-hdd
- name: qbittorrent-cache
mountPath: "/downloads"
additionalVolumes:
- name: nfs-video
persistentVolumeClaim:
claimName: nfs-video
- name: local-hdd
- name: qbittorrent-cache
persistentVolumeClaim:
claimName: local-hdd
claimName: qbittorrent-cache
resources:
requests:
memory: 500Mi
@@ -58,6 +68,3 @@ spec:
annotations:
prometheus.io/probe: "true"
prometheus.io/protocol: http
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-sonarr

View File

@@ -17,4 +17,6 @@ spec:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
type: LoadBalancer
loadBalancerIP: 192.168.9.240
externalIPs:
- 192.168.169.110
externalTrafficPolicy: Local

View File

@@ -83,7 +83,11 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
valuesFrom:
- kind: ConfigMap
name: helmrelease-media-tdarr
hosts:
- host: tdarr.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "tdarr.k3s.xpander.ovh"

View File

@@ -17,10 +17,6 @@ spec:
namespace: flux-system
interval: 5m
values:
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
allowIcmp: true
config:
modules:
@@ -46,27 +42,62 @@ spec:
release: prometheus
interval: 2m
scrapeTimeout: 30s
targets:
- name: truenas
url: truenas
module: icmp
- name: truenas-remote
url: truenas-remote
module: icmp
- name: borgbackup
url: 192.168.9.20
module: icmp
- name: postgresql
url: postgresql
module: icmp
- name: rpizw1
url: rpizw1
module: icmp
- name: k3os-server
url: k3os-server
module: icmp
- name: k3os-worker1
url: k3os-worker1
module: icmp
- name: k3os-worker2
url: k3os-worker2
module: icmp
- name: k3os-worker3
url: k3os-worker3
module: icmp
prometheusRule:
enabled: true
additionalLabels:
app: prometheus-operator
release: prometheus
rules:
- alert: HostDown
expr: probe_success == 0
for: 10m
labels:
severity: critical
annotations:
message: The host {{"{{ $labels.target }}"}} is currently unreachable
- alert: SlowResponseTime
annotations:
message: The response time for {{"{{ $labels.target }}"}} has been greater than 30 seconds for 5 minutes.
expr: probe_duration_seconds > 30
for: 15m
labels:
severity: warning
valuesFrom:
- kind: ConfigMap
name: "helmrelease-monitoring-blackbox-exporter"
optional: false
- alert: HostDown
expr: probe_success == 0
for: 10m
labels:
severity: critical
annotations:
message: The host {{"{{ $labels.target }}"}} is currently unreachable
- alert: SlowResponseTime
annotations:
message: The response time for {{"{{ $labels.target }}"}} has been greater than 30 seconds for 5 minutes.
expr: probe_duration_seconds > 30
for: 15m
labels:
severity: warning
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts:
- "blackbox.k3s.xpander.ovh"
tls:
- hosts:
- "blackbox.k3s.xpander.ovh"

View File

@@ -26,14 +26,14 @@ spec:
settings:
clustername: k3s
resources:
- name: v1/pods # Name of the resources e.g pod, deployment, ingress, etc. (Resource name must be in singular form)
- name: v1/pods # Name of the resources e.g pod, deployment, ingress, etc. (Resource name must be in singular form)
namespaces:
include:
- all
ignore: # List of namespaces to be ignored (omitempty), used only with include: all
- longhorn-system # example : include [all], ignore [x,y,z]
ignore: # List of namespaces to be ignored (omitempty), used only with include: all
- longhorn-system # example : include [all], ignore [x,y,z]
- kube-system
events: # List of lifecycle events you want to receive, e.g create, update, delete, error OR all
events: # List of lifecycle events you want to receive, e.g create, update, delete, error OR all
- create
- delete
- name: v1/services
@@ -179,5 +179,5 @@ spec:
notiftype: short
valuesFrom:
- kind: ConfigMap
name: "helmrelease-monitoring-botkube"
name: botkube-helmrelease
optional: false

View File

@@ -24,6 +24,15 @@ spec:
tag: v3.1.4
dashboard:
replicaCount: 1
valuesFrom:
- kind: ConfigMap
name: helmrelease-monitoring-goldilocks
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts:
- host: goldilocks.k3s.xpander.ovh
paths: ["/"]
tls:
- hosts:
- goldilocks.k3s.xpander.ovh

View File

@@ -17,7 +17,6 @@ spec:
namespace: flux-system
interval: 5m
values:
image:
repository: linuxserver/healthchecks
tag: v1.19.0-ls79
@@ -59,7 +58,15 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
hosts:
- host: healthchecks.k3s.xpander.ovh
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "healthchecks.k3s.xpander.ovh"
valuesFrom:
- kind: ConfigMap
name: helmrelease-monitoring-healthchecks
- kind: ConfigMap
name: healthchecks-helmrelease

View File

@@ -19,7 +19,9 @@ spec:
values:
service:
type: LoadBalancer
loadBalancerIP: 192.168.9.205
externalIPs:
- 192.168.169.107
externalTrafficPolicy: Local
persistence:
enabled: true
size: 30Gi
@@ -56,4 +58,4 @@ spec:
memory: 2Gi
cpu: 100m
limits:
memory: 4Gi
memory: 4Gi

View File

@@ -28,6 +28,33 @@ spec:
prometheusOperator:
createCustomResource: true
alertmanager:
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts: [alert-manager.k3s.xpander.ovh]
tls:
- hosts:
- alert-manager.k3s.xpander.ovh
config:
global:
resolve_timeout: 5m
route:
receiver: "pushover"
routes:
- match:
alertname: Watchdog
receiver: "null"
- receiver: "pushover"
inhibit_rules:
- source_match:
severity: "critical"
target_match:
severity: "warning"
# Apply inhibition if the alertname is the same.
equal: ["alertname", "namespace"]
alertmanagerSpec:
storage:
volumeClaimTemplate:
@@ -39,19 +66,19 @@ spec:
nodeExporter:
serviceMonitor:
relabelings:
- action: replace
regex: (.*)
replacement: $1
sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: kubernetes_node
- action: replace
regex: (.*)
replacement: $1
sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: kubernetes_node
kubelet:
serviceMonitor:
metricRelabelings:
- action: replace
sourceLabels:
- node
targetLabel: instance
- action: replace
sourceLabels:
- node
targetLabel: instance
grafana:
dashboards:
default:
@@ -76,23 +103,23 @@ spec:
GF_DISABLE_SANITIZE_HTML: true
GF_PANELS_DISABLE_SANITIZE_HTML: true
plugins:
- natel-discrete-panel
- pr0ps-trackmap-panel
- grafana-piechart-panel
- vonage-status-panel
- https://github.com/panodata/grafana-map-panel/releases/download/0.9.0/grafana-map-panel-0.9.0.zip;grafana-worldmap-panel-ng
- natel-discrete-panel
- pr0ps-trackmap-panel
- grafana-piechart-panel
- vonage-status-panel
- https://github.com/panodata/grafana-map-panel/releases/download/0.9.0/grafana-map-panel-0.9.0.zip;grafana-worldmap-panel-ng
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
- name: "default"
orgId: 1
folder: ""
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
sidecar:
datasources:
enabled: true
@@ -101,38 +128,40 @@ spec:
enabled: true
searchNamespace: ALL
additionalDataSources:
- name: Prometheus
type: prometheus
access: proxy
url: http://thanos-query-http:10902/
isDefault: true
- name: loki
type: loki
access: proxy
url: http://loki.logging.svc.cluster.local:3100/
- name: influxdb-pfsense
type: influxdb
acces: server
url: http://influxdb:8086/
database: pfsense
user: pfsense
- name: influxdb-rpi-os
type: influxdb
acces: server
url: http://influxdb:8086/
database: rpi-os
user: rpi-os
- name: influxdb-graphite
type: influxdb
database: graphite
acces: server
url: http://influxdb:8086/
- name: influxdb-home_assistant
type: influxdb
acces: server
url: http://influxdb:8086/
database: home_assistant
- name: Prometheus
type: prometheus
access: proxy
url: http://thanos-query-http:10902/
isDefault: true
- name: loki
type: loki
access: proxy
url: http://loki:3100/
- name: influxdb-pfsense
type: influxdb
acces: server
url: http://influxdb:8086/
database: pfsense
user: pfsense
- name: influxdb-rpi-os
type: influxdb
acces: server
url: http://influxdb:8086/
database: rpi-os
user: rpi-os
- name: influxdb-graphite
type: influxdb
database: graphite
acces: server
url: http://influxdb:8086/
- name: influxdb-home_assistant
type: influxdb
acces: server
url: http://influxdb:8086/
database: home_assistant
grafana.ini:
server:
root_url: https://grafana.k3s.xpander.ovh
paths:
data: /var/lib/grafana/data
logs: /var/log/grafana
@@ -146,6 +175,16 @@ spec:
url: https://grafana.net
smtp:
enabled: false
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts: [grafana.k3s.xpander.ovh]
tls:
- hosts:
- grafana.k3s.xpander.ovh
kubeEtcd:
enabled: false
kubeControllerManager:
@@ -155,6 +194,16 @@ spec:
kubeProxy:
enabled: false
prometheus:
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts: [prometheus.k3s.xpander.ovh]
tls:
- hosts:
- prometheus.k3s.xpander.ovh
prometheusSpec:
replicas: 2
replicaExternalLabelName: "replica"
@@ -183,90 +232,100 @@ spec:
name: thanos
key: object-store.yaml
additionalScrapeConfigs:
# Example scrape config for probing ingresses via the Blackbox Exporter.
#
# The relabeling allows the actual ingress scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/probe`: Only probe ingresses that have a value of `true`
- job_name: 'kubernetes-ingresses'
metrics_path: /probe
scrape_interval: 60s
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: ingress
relabel_configs:
- source_labels: [__meta_kubernetes_ingress_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels: [__meta_kubernetes_ingress_scheme, __address__, __meta_kubernetes_ingress_path]
regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3}
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter-prometheus-blackbox-exporter:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_ingress_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_ingress_name]
target_label: kubernetes_name
- job_name: 'kubernetes-services-http'
metrics_path: /probe
scrape_interval: 60s
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: service
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_protocol]
action: keep
regex: http
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter-prometheus-blackbox-exporter:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name
- job_name: 'kubernetes-services-tcp'
metrics_path: /probe
scrape_interval: 60s
params:
module: [tcp_connect]
kubernetes_sd_configs:
- role: service
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_protocol]
action: keep
regex: tcp
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter-prometheus-blackbox-exporter:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name
# Example scrape config for probing ingresses via the Blackbox Exporter.
#
# The relabeling allows the actual ingress scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/probe`: Only probe ingresses that have a value of `true`
- job_name: "kubernetes-ingresses"
metrics_path: /probe
scrape_interval: 60s
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: ingress
relabel_configs:
- source_labels:
[__meta_kubernetes_ingress_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels:
[
__meta_kubernetes_ingress_scheme,
__address__,
__meta_kubernetes_ingress_path,
]
regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3}
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter-prometheus-blackbox-exporter:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_ingress_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_ingress_name]
target_label: kubernetes_name
- job_name: "kubernetes-services-http"
metrics_path: /probe
scrape_interval: 60s
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: service
relabel_configs:
- source_labels:
[__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels:
[__meta_kubernetes_service_annotation_prometheus_io_protocol]
action: keep
regex: http
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter-prometheus-blackbox-exporter:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name
- job_name: "kubernetes-services-tcp"
metrics_path: /probe
scrape_interval: 60s
params:
module: [tcp_connect]
kubernetes_sd_configs:
- role: service
relabel_configs:
- source_labels:
[__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels:
[__meta_kubernetes_service_annotation_prometheus_io_protocol]
action: keep
regex: tcp
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter-prometheus-blackbox-exporter:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name
valuesFrom:
- kind: ConfigMap
name: helmrelease-monitoring-prometheus-stack
- kind: ConfigMap
name: prometheus-stack-helmrelease

View File

@@ -21,7 +21,8 @@ spec:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
valuesFrom:
- kind: ConfigMap
name: "helmrelease-monitoring-kubernetes-dashboard"
optional: false
hosts: ["kubernetes-dashboard.k3s.xpander.ovh"]
paths: ["/"]
tls:
- hosts:
- "kubernetes-dashboard.k3s.xpander.ovh"

View File

@@ -46,19 +46,19 @@ spec:
# You can use a headless k8s service for all distributor,
# ingester and querier components.
join_members:
- loki-stack-headless:7946
- loki-stack-headless:7946
# max_join_backoff: 1m
# max_join_retries: 10
# min_join_backoff: 1s
schema_config:
configs:
- from: 2020-05-15
store: boltdb-shipper
object_store: s3
schema: v11
index:
prefix: index_
period: 24h
- from: 2020-05-15
store: boltdb-shipper
object_store: s3
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: /data/loki/index
@@ -70,10 +70,10 @@ spec:
reject_old_samples: true
reject_old_samples_max_age: 168h
extraPorts:
- port: 7956
protocol: TCP
name: loki-gossip-ring
targetPort: 7946
- port: 7956
protocol: TCP
name: loki-gossip-ring
targetPort: 7946
serviceMonitor:
enabled: true
podAnnotations:
@@ -143,7 +143,9 @@ spec:
enabled: true
type: LoadBalancer
port: 1514
loadBalancerIP: 192.168.9.208
externalIPs:
- 192.168.169.109
externalTrafficPolicy: Local
valuesFrom:
- kind: ConfigMap
name: helmrelease-monitoring-loki-stack
- kind: ConfigMap
name: loki-stack-helmrelease

View File

@@ -24,35 +24,36 @@ spec:
app.kubernetes.io/name: syslog-ng
spec:
containers:
- image: balabit/syslog-ng:3.29.1
imagePullPolicy: Always
name: syslog-ng
# securityContext:
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1001
ports:
- containerPort: 514
name: pfsense-syslog
volumeMounts:
- name: config
mountPath: /etc/syslog-ng/syslog-ng.conf
subPath: syslog-ng.conf
livenessProbe:
exec:
command:
- cat
- image: balabit/syslog-ng:3.29.1
imagePullPolicy: Always
name:
syslog-ng
# securityContext:
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1001
ports:
- containerPort: 514
name: pfsense-syslog
volumeMounts:
- name: config
mountPath: /etc/syslog-ng/syslog-ng.conf
subPath: syslog-ng.conf
livenessProbe:
exec:
command:
- cat
volumes:
- name: config
configMap:
name: syslog-ng-config
- name: config
configMap:
name: syslog-ng-config
dnsConfig:
options:
- name: ndots
value: "1"
- name: ndots
value: "1"
---
apiVersion: v1
kind: ConfigMap
@@ -89,12 +90,14 @@ metadata:
namespace: monitoring
spec:
ports:
- name: pfsense-syslog
port: 514
protocol: UDP
targetPort: 514
- name: pfsense-syslog
port: 514
protocol: UDP
targetPort: 514
selector:
app.kubernetes.io/instance: syslog-ng
app.kubernetes.io/name: syslog-ng
type: LoadBalancer
loadBalancerIP: 192.168.9.202
externalIPs:
- 192.168.169.108
externalTrafficPolicy: Local

View File

@@ -18,6 +18,16 @@ spec:
interval: 5m
values:
query:
http:
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.auth.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.k3s.xpander.ovh/"
hosts: ["thanos.k3s.xpander.ovh"]
tls:
- hosts: ["thanos.k3s.xpander.ovh"]
replicaCount: 3
replicaLabels:
- replica
@@ -37,5 +47,5 @@ spec:
enable: false
part_size: 0
valuesFrom:
- kind: ConfigMap
name: helmrelease-monitoring-thanos
- kind: ConfigMap
name: thanos-helmrelease

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: network
labels:
goldilocks.fairwinds.com/enabled: "true"

View File

@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: unifi-config
namespace: network
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-backups
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,129 @@
# see: https://github.com/ori-edge/k8s_gateway/blob/master/examples/install-clusterwide.yml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: excoredns
namespace: network
---
# Source: coredns/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: excoredns
namespace: network
data:
Corefile: |-
.:53 {
errors
log
ready
k8s_gateway k3s.xpander.ovh
{
resources Ingress Service
ttl 10
apex dns1
}
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
# Source: coredns/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: excoredns
rules:
- apiGroups:
- ""
resources:
- services
- namespaces
verbs:
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- list
- watch
---
# Source: coredns/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: excoredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: excoredns
subjects:
- kind: ServiceAccount
name: excoredns
namespace: network
---
apiVersion: v1
kind: Service
metadata:
name: external-dns
namespace: network
spec:
selector:
k8s-app: "excoredns"
ports:
- name: udp-53
port: 53
protocol: UDP
type: LoadBalancer
externalIPs:
- 192.168.169.100
externalTrafficPolicy: Local
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: excoredns
namespace: network
spec:
replicas: 1
selector:
matchLabels:
k8s-app: "excoredns"
template:
metadata:
labels:
k8s-app: "excoredns"
spec:
serviceAccountName: excoredns
dnsPolicy: ClusterFirst
containers:
- name: "coredns"
image: "quay.io/oriedge/k8s_gateway:v0.1.4"
imagePullPolicy: IfNotPresent
args: ["-conf", "/etc/coredns/Corefile"]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
ports:
- { containerPort: 53, protocol: UDP, name: udp-53 }
- { containerPort: 53, protocol: TCP, name: tcp-53 }
volumes:
- name: config-volume
configMap:
name: excoredns
items:
- key: Corefile
path: Corefile

View File

@@ -3,7 +3,7 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: unifi
namespace: data
namespace: network
spec:
interval: 5m
chart:
@@ -35,34 +35,44 @@ spec:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/proxy-body-size: 10m
hosts:
- unifi.k3s.xpander.ovh
tls:
- hosts:
- unifi.k3s.xpander.ovh
guiService:
type: LoadBalancer
loadBalancerIP: 192.168.9.201
externalIPs:
- 192.168.169.103
externalTrafficPolicy: Local
annotations:
metallb.universe.tf/allow-shared-ip: unifi
prometheus.io/probe: "true"
prometheus.io/protocol: tcp
controllerService:
type: LoadBalancer
loadBalancerIP: 192.168.9.201
externalIPs:
- 192.168.169.103
externalTrafficPolicy: Local
annotations:
metallb.universe.tf/allow-shared-ip: unifi
prometheus.io/probe: "true"
prometheus.io/protocol: tcp
stunService:
type: LoadBalancer
loadBalancerIP: 192.168.9.201
externalIPs:
- 192.168.169.103
externalTrafficPolicy: Local
annotations:
metallb.universe.tf/allow-shared-ip: unifi
discoveryService:
type: LoadBalancer
loadBalancerIP: 192.168.9.201
externalIPs:
- 192.168.169.103
externalTrafficPolicy: Local
annotations:
metallb.universe.tf/allow-shared-ip: unifi
resources:
requests:
memory: 2Gi
cpu: 100m
valuesFrom:
- kind: ConfigMap
name: helmrelease-data-unifi

View File

@@ -167,3 +167,26 @@ spec:
nfs:
server: truenas
path: "/mnt/storage/home/helene"
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: qbittorrent-cache
spec:
capacity:
storage: 600Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: local
local:
path: /mnt/ssd1/qbittorrent
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k3s-worker3