new gitops template

This commit is contained in:
auricom
2021-04-13 10:34:08 +02:00
parent 67c4d6a855
commit a95f32b44d
335 changed files with 3131 additions and 3650 deletions

View File

@@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# renovate: registryUrl=https://charts.jetstack.io chart=cert-manager
- https://github.com/jetstack/cert-manager/releases/download/v1.3.0/cert-manager.crds.yaml

View File

@@ -0,0 +1,54 @@
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: kube-prometheus-stack-source
namespace: flux-system
spec:
interval: 30m
url: https://github.com/prometheus-community/helm-charts.git
ref:
# renovate: registryUrl=https://prometheus-community.github.io/helm-charts
tag: kube-prometheus-stack-14.6.2
ignore: |
# exclude all
/*
# include deploy crds dir
!/charts/kube-prometheus-stack/crds
---
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: kube-prometheus-stack-crds
namespace: flux-system
spec:
interval: 15m
prune: false
sourceRef:
kind: GitRepository
name: kube-prometheus-stack-source
healthChecks:
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: alertmanagerconfigs.monitoring.coreos.com
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: alertmanagers.monitoring.coreos.com
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: podmonitors.monitoring.coreos.com
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: probes.monitoring.coreos.com
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: prometheuses.monitoring.coreos.com
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: prometheusrules.monitoring.coreos.com
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: servicemonitors.monitoring.coreos.com
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: thanosrulers.monitoring.coreos.com

View File

@@ -0,0 +1,5 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- crd.yaml

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cert-manager
- kube-prometheus-stack

View File

@@ -0,0 +1,41 @@
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: cert-manager-webhook-ovh
namespace: flux-system
spec:
interval: 1440m
url: https://github.com/baarde/cert-manager-webhook-ovh
ref:
branch: master
ignore: |
# exclude all
/*
# include charts directory
!/deploy/
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager-webhook-ovh:secret-reader
namespace: cert-manager
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["ovh-credentials"]
verbs: ["get", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: cert-manager-webhook-ovh:secret-reader
namespace: cert-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cert-manager-webhook-ovh:secret-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook-ovh

View File

@@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cert-manager-webhook-ovh.yaml
- letsencrypt-production.yaml
- letsencrypt-staging.yaml
- secret.enc.yaml

View File

@@ -0,0 +1,23 @@
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
server: https://acme-staging-v02.api.letsencrypt.org/directory
email: "${SECRET_CLUSTER_DOMAIN_EMAIL}"
privateKeySecretRef:
name: letsencrypt-staging
solvers:
- dns01:
webhook:
groupName: "${SECRET_CLUSTER_DOMAIN_ROOT}"
solverName: ovh
config:
endpoint: ovh-eu
applicationKey: "${SECRET_CLUSTER_OVH_APPLICATION_KEY}"
applicationSecretRef:
key: applicationSecret
name: ovh-credentials
consumerKey: "${SECRET_CLUSTER_OVH_CONSUMER_KEY}"

View File

@@ -0,0 +1,23 @@
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-production
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: "${SECRET_CLUSTER_DOMAIN_EMAIL}"
privateKeySecretRef:
name: letsencrypt-production
solvers:
- dns01:
webhook:
groupName: "${SECRET_CLUSTER_DOMAIN_ROOT}"
solverName: ovh
config:
endpoint: ovh-eu
applicationKey: "${SECRET_CLUSTER_OVH_APPLICATION_KEY}"
applicationSecretRef:
key: applicationSecret
name: ovh-credentials
consumerKey: "${SECRET_CLUSTER_OVH_CONSUMER_KEY}"

View File

@@ -0,0 +1,36 @@
kind: Secret
apiVersion: v1
metadata:
name: ovh-credentials
namespace: cert-manager
data:
applicationSecret: ENC[AES256_GCM,data:DSYSki4dpDJn2E1lRyRo1G6/atfacDPn6LyfM4hKQhKau/jDIjztarH280o=,iv:la3Vt+2U5gO3DqXb/NsVzDOsgckNw9SCTsv/jGtOZ4w=,tag:qjk4qYl/eqhbBd+ayxRKZw==,type:str]
type: Opaque
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
lastmodified: "2021-04-14T13:49:54Z"
mac: ENC[AES256_GCM,data:v5c+yR/9uKX9ncNZNS92mVTYmKnusJdZ2O8osR5RjiHPx3GKeDsIkd5/gPxsFD3BWS2LDyVKBVPBxGMkCiIawDrUgmUjVDYuyXIDbn+ui9lfCEYwCCjxz5KkOotkPij6nXRQ9t0UsBmf/RhG1TN7rKVPwBEwt00kZYDh8BEMbZI=,iv:ZH3iFdaW0Q1f5qwkVZjTxI5xdX2aIjO9Dx6NdNxzlww=,tag:10W+kZE/ZAR4bqebPs6Gfg==,type:str]
pgp:
- created_at: "2021-04-14T13:49:54Z"
enc: |
-----BEGIN PGP MESSAGE-----
hQGMA/JorPHm1g9XAQv+OfbvF0TxlCEbp7Kb45tSpIt0UzX/ae9KBGPCKAxgXAjd
czsjIX0NH1n4YINNKuHIunm18FoAJUOtRyWWaJDwW8R0z/O24yq/E7bfXn4RNVsE
lFwRKtF3zPtQNFTLITPqzmINEeZsFobcV2l+gfXW6lieHWo937YYSxNYLyWrso1t
snNyjdAKWlckv9xr7ZLK8UftQrdwa7D1Ig+W/6xxor5z6IiaJUPeHGDuTJ7nSsWz
3LaqeGeW6a5zgL8JzMzhd9xHSGqaS2vGEGeNyMIFf466qUspQDLSXq8/a0YYFWb/
CsmySgm3RqYu6o+WvLEAnsIKKRISHsCUMnArshmCpnvJ6q1hIPxyJg/dX4hzBFau
MS2Ma3WD3WD2edS3uSVApJ9RDc2lLJDXQ4qEDgOeok1StOE4ANfTyP1QYS4yYHue
VZVZyCvsrsxbC2GQWQK0RRki/WY2p+V7lCa/ropDa6WcHCq2agfbQ769J3erMYii
b1efKs2vpf0HLrnnK+IF0lwBSCjz9ffqGq7+OP5Aj6uXV+E6R4kUzgn2KvIiLrot
U87wpXcyYL9J5hyzVWKS0S//kKbCqapPFia9vuxPMh4GgF+i1xshCTqqHzJfKzK4
G/YLCKCae2PnX/rkRA==
=Sv3h
-----END PGP MESSAGE-----
fp: C8F8A49D04A1AB639F8EA21CDBA4B1DCB1FA5BDD
encrypted_regex: ^(data|stringData)$
version: 3.6.1

View File

@@ -0,0 +1,35 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: descheduler
namespace: kube-system
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://kubernetes-sigs.github.io/descheduler
chart: descheduler-helm-chart
version: 0.19.1
sourceRef:
kind: HelmRepository
name: kubernetes-sigs-descheduler-charts
namespace: flux-system
interval: 5m
values:
schedule: "*/15 * * * *"
podAnnotations:
botkube.io/disable: "true"
deschedulerPolicy:
strategies:
RemoveDuplicates:
enabled: false
RemovePodsViolatingNodeAffinity:
enabled: true
params:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
RemovePodsViolatingInterPodAntiAffinity:
enabled: false
LowNodeUtilization:
enabled: false

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- notifications

View File

@@ -0,0 +1,58 @@
---
apiVersion: notification.toolkit.fluxcd.io/v1beta1
kind: Alert
metadata:
name: k3s-gitops
namespace: flux-system
spec:
providerRef:
name: discord
eventSeverity: info
eventSources:
- kind: GitRepository
name: "*"
- kind: Kustomization
name: "*"
suspend: false
---
apiVersion: notification.toolkit.fluxcd.io/v1beta1
kind: Alert
metadata:
name: helmreleases
namespace: flux-system
spec:
providerRef:
name: discord
eventSeverity: error
eventSources:
- kind: HelmRelease
namespace: "auth"
name: "*"
- kind: HelmRelease
namespace: "data"
name: "*"
- kind: HelmRelease
namespace: "development"
name: "*"
- kind: HelmRelease
namespace: "flux-system"
name: "*"
- kind: HelmRelease
namespace: "home"
name: "*"
- kind: HelmRelease
namespace: "kube-system"
name: "*"
- kind: HelmRelease
namespace: "longhorn-system"
name: "*"
- kind: HelmRelease
namespace: "media"
name: "*"
- kind: HelmRelease
namespace: "monitoring"
name: "*"
- kind: HelmRelease
namespace: "network"
name: "*"
suspend: false

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- alerts.yaml
- provider.yaml
- secret.enc.yaml

View File

@@ -0,0 +1,12 @@
---
apiVersion: notification.toolkit.fluxcd.io/v1beta1
kind: Provider
metadata:
name: discord
namespace: flux-system
spec:
type: discord
username: flux-bot
channel: flux
secretRef:
name: discord-url

View File

@@ -0,0 +1,36 @@
kind: Secret
apiVersion: v1
metadata:
name: discord-url
namespace: flux-system
data:
address: ENC[AES256_GCM,data:sejmGw4AyvTseb/yStzMG+XDBfrPbPh9pXcgOKuwJOWkWNnT4oO0cnFgA29jw58/BgPJzT9yqyyUG9KHsMwpo1JVPE2VxPsvQy0Zos5OMV03kMPvGtc8EkMWKA5mVKg+seSR2rlhx3NYUa/JOQlB5P3P1jHurkzAGmUnjY+FC0AS0CAq2KIijJT+9rw/tEWAJ8ShPciK8mP+zLxXwxE7Tw==,iv:o3VtRLaWfNdpPpT1z5dPNdF20/UBSWzLc+Zcjt4bJl4=,tag:A2vbm9MJiOKQ26xAfUQG4g==,type:str]
type: Opaque
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
lastmodified: "2021-04-14T14:13:49Z"
mac: ENC[AES256_GCM,data:i+Zv7gnXE8Wk+ZgJjF6Fmfvoiw2L+BF9Qm5NLdJyOduHzBe8a3U6FwjMyE+K1DRhKbVRJY3hQrFoP2J5MwtRMRoDJJSIM/80OJxhq5ULnWKrKdSDFQ8qT8XWkTfds7nTxaQ42iXKLFhdjlaI6Nei2fjF5zF+kb1YDJKQQ6O6Lw0=,iv:uwwHzQtQ5aoZnD09a+Adc67Nfv1OuaukfwpinaVMWMM=,tag:ycSSKQcTIbvZlYazJM76Ow==,type:str]
pgp:
- created_at: "2021-04-14T14:13:49Z"
enc: |
-----BEGIN PGP MESSAGE-----
hQGMA/JorPHm1g9XAQv+PyY7WRiphSXYrLDGaJ94aT778ZlLVrcMcMvhHbc62eTT
KZTM3dDrb/ObYyyv1C4EPMbBiccrmiIQWIeJNcdKV5AJnHdD+uPrJikdx5mWndNQ
sK6b2IfMhIQm1zOj37feftLNTU6a+M10yCUFFFZgynxsC8QhH8mJ8fMZEGnPaf2r
/mXPRToVwkQ9GEjgywdLcvSB/2rmTwCAlgnj2uBqAk5ke09l1nZtWMa/iutz+vDm
vugsX8OikmaI2vtC3oLSgGoPV8Gjvb1rAgkIw7uEEhe253+utafa4HKgMhWRDooK
p7SXgF798WwP6GflvQThnSkCBUL2YQOpUTWL5i4oxc3Wgk1gbrtA5WUAhtRh6hby
pdVzQsZdHUKZBemcgid9SS8QVEC3bg/IhACDdSb14Sgd4H0lQ0BhYf10S5nPkvPG
hAW++GiGaatVO/mJi/sMKK3Hcn1kBosgIhBEkoHXLGWwOxc4OVbmG9xwlX2nziTh
IKMeX8nc+PkG8OF3RouV0lwBip1b5QVQ6q3okAF7zK5Su8n7L/qJIidGnIwd1zVg
16Iz4HcCayjCr1YtJo+aofRdZXuZe8n7a5So7V5v2ImuJk/U2Boonz9v4xwGBSDv
R7iroDbzS4+Hevciww==
=oS8F
-----END PGP MESSAGE-----
fp: C8F8A49D04A1AB639F8EA21CDBA4B1DCB1FA5BDD
encrypted_regex: ^(data|stringData)$
version: 3.6.1

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- discord

View File

@@ -0,0 +1,33 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: intel-gpu-plugin
namespace: kube-system
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://k8s-at-home.com/charts/
chart: intel-gpu-plugin
version: 1.2.3
sourceRef:
kind: HelmRepository
name: k8s-at-home-charts
namespace: flux-system
interval: 5m
values:
controllerType: deployment
image:
repository: intel/intel-gpu-plugin
tag: 0.20.0
pullPolicy: IfNotPresent
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: feature.node.kubernetes.io/custom-intel-gpu
operator: In
values:
- "true"

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cert-manager
- descheduler
- flux
- intel-gpu-plugin
- longhorn-system
- node-feature-discovery
- system-upgrade

View File

@@ -0,0 +1,41 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: longhorn
namespace: longhorn-system
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://charts.longhorn.io
chart: longhorn
version: 1.1.0
sourceRef:
kind: HelmRepository
name: longhorn-charts
namespace: flux-system
interval: 5m
values:
defaultSettings:
backupTarget: s3://longhorn@us-east-1/
backupTargetCredentialSecret: minio-truenas-credentials
createDefaultDiskLabeledNodes: true
defaultDataPath: /var/lib/longhorn/
replicaSoftAntiAffinity: false
storageOverProvisioningPercentage: 300
storageMinimalAvailablePercentage: 25
upgradeChecker: true
defaultReplicaCount: 3
guaranteedEngineCPU: 0.25
defaultLonghornStaticStorageClass: longhorn-backups
backupstorePollInterval: 10800
autoSalvage: true
disableSchedulingOnCordonedNode: true
replicaZoneSoftAntiAffinity: true
volumeAttachmentRecoveryPolicy: wait
csi:
kubeletRootDir: /var/lib/kubelet
tls: true
ingress:
enabled: false

View File

@@ -0,0 +1,26 @@
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: longhorn-ui
namespace: longhorn-system
annotations:
kubernetes.io/ingress.class: "nginx"
ingress.kubernetes.io/secure-backends: "true"
nginx.ingress.kubernetes.io/auth-url: "http://authelia.networking.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.${SECRET_CLUSTER_DOMAIN_CERT}/"
spec:
tls:
- hosts:
- longhorn.${SECRET_CLUSTER_DOMAIN}
rules:
- host: longhorn.${SECRET_CLUSTER_DOMAIN}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: longhorn-frontend
port:
number: 80

View File

@@ -0,0 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml
- ingress.yaml
- monitoring.yaml
- storageclass.yaml
- secret.enc.yaml

View File

@@ -0,0 +1,109 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: longhorn-prometheus-servicemonitor
namespace: longhorn-system
labels:
name: longhorn-prometheus-servicemonitor
spec:
selector:
matchLabels:
app: longhorn-manager
namespaceSelector:
matchNames:
- longhorn-system
endpoints:
- port: manager
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
labels:
prometheus: longhorn
role: alert-rules
name: prometheus-longhorn-rules
namespace: monitoring
spec:
groups:
- name: longhorn.rules
rules:
#- alert: LonghornVolumeActualSpaceUsedWarning
# annotations:
# description: The actual space used by Longhorn volume {{$labels.volume}} on {{$labels.node}} is at {{$value}}% capacity for
# more than 5 minutes.
# summary: The actual used space of Longhorn volume is over 90% of the capacity.
# expr: (longhorn_volume_actual_size_bytes / longhorn_volume_capacity_bytes) * 100 > 90
# for: 5m
# labels:
# issue: The actual used space of Longhorn volume {{$labels.volume}} on {{$labels.node}} is high.
# severity: warning
- alert: LonghornVolumeStatusCritical
annotations:
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Fault for
more than 2 minutes.
summary: Longhorn volume {{$labels.volume}} is Fault
expr: longhorn_volume_robustness == 3
for: 5m
labels:
issue: Longhorn volume {{$labels.volume}} is Fault.
severity: critical
- alert: LonghornVolumeStatusWarning
annotations:
description: Longhorn volume {{$labels.volume}} on {{$labels.node}} is Degraded for
more than 5 minutes.
summary: Longhorn volume {{$labels.volume}} is Degraded
expr: longhorn_volume_robustness == 2
for: 5m
labels:
issue: Longhorn volume {{$labels.volume}} is Degraded.
severity: warning
- alert: LonghornNodeStorageWarning
annotations:
description: The used storage of node {{$labels.node}} is at {{$value}}% capacity for
more than 5 minutes.
summary: The used storage of node is over 70% of the capacity.
expr: (longhorn_node_storage_usage_bytes / longhorn_node_storage_capacity_bytes) * 100 > 70
for: 5m
labels:
issue: The used storage of node {{$labels.node}} is high.
severity: warning
- alert: LonghornDiskStorageWarning
annotations:
description: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is at {{$value}}% capacity for
more than 5 minutes.
summary: The used storage of disk is over 70% of the capacity.
expr: (longhorn_disk_usage_bytes / longhorn_disk_capacity_bytes) * 100 > 70
for: 5m
labels:
issue: The used storage of disk {{$labels.disk}} on node {{$labels.node}} is high.
severity: warning
- alert: LonghornNodeDown
annotations:
description: There are {{$value}} Longhorn nodes which have been offline for more than 5 minutes.
summary: Longhorn nodes is offline
expr: longhorn_node_total - (count(longhorn_node_status{condition="ready"}==1) OR on() vector(0))
for: 5m
labels:
issue: There are {{$value}} Longhorn nodes are offline
severity: critical
- alert: LonghornIntanceManagerCPUUsageWarning
annotations:
description: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is {{$value}}% for
more than 5 minutes.
summary: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} has CPU Usage / CPU request is over 300%.
expr: (longhorn_instance_manager_cpu_usage_millicpu/longhorn_instance_manager_cpu_requests_millicpu) * 100 > 300
for: 5m
labels:
issue: Longhorn instance manager {{$labels.instance_manager}} on {{$labels.node}} comsumes 3 times the CPU request.
severity: warning
- alert: LonghornNodeCPUUsageWarning
annotations:
description: Longhorn node {{$labels.node}} has CPU Usage / CPU capacity is {{$value}}% for
more than 5 minutes.
summary: Longhorn node {{$labels.node}} experiences high CPU pressure for more than 5m.
expr: (longhorn_node_cpu_usage_millicpu / longhorn_node_cpu_capacity_millicpu) * 100 > 90
for: 5m
labels:
issue: Longhorn node {{$labels.node}} experiences high CPU pressure.
severity: warning

View File

@@ -0,0 +1,39 @@
kind: Secret
apiVersion: v1
metadata:
name: minio-truenas-credentials
namespace: longhorn-system
data:
AWS_ACCESS_KEY_ID: ENC[AES256_GCM,data:uE5CV9wcWg8=,iv:l41hwC+43JWRbcsqpRwukwkpHcWjMmGf9eNtR8kV0VM=,tag:TrHP2GlnSbqWE7TS9neGfw==,type:str]
AWS_SECRET_ACCESS_KEY: ENC[AES256_GCM,data:Jhg/KgZzOmU8jB3K0pMuke8BuUIWRVoQ1US3cw==,iv:lRidTSpintFfwd4/W32FGHEMy/v06ILrN62nPoMB3ew=,tag:NYT3ST+lsp6QkvjTEeXHBw==,type:str]
AWS_ENDPOINTS: ENC[AES256_GCM,data:SdIM5UQmzsibf6lD0UN/2ztF03WeM5GqoEi71HtaNKeDRNqCXAssFhUd0l0=,iv:Ep5Xdpu48QriwOA1qmBPaNpcbiudNkpH+I2YiFpYCFY=,tag:4oJYhEMyUsIG8OJ+73wf1g==,type:str]
#ENC[AES256_GCM,data:/pUAj7tHPkqci0vh/I5x5M6LebjodkftjOsXFCpQyW2D,iv:qTDtrQVblNVeUfAtBoUgO0rbqGzf4jQbjna0OQZdUf0=,tag:XZc9TX3zGZGbNz3CyYmKLw==,type:comment]
type: Opaque
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
lastmodified: "2021-04-14T14:49:06Z"
mac: ENC[AES256_GCM,data:CdXYSx72+JQMw4ZuCma8u0VTM5wNYNC0L2iBSBuLA0nr8YzMh59CAjc2S3ITpnusFQ3onisurrDoKj25GRJu0Dns4d1oluKGdsiIc8nfwSsRxxfRKb+iPa0B0lGsI2XvuvqBYcWLZ0S988NXfi8VCyaXIdoFMFjOPel9+KqPSio=,iv:8aq1YspzEiXqOIPHzZhAs930uwomdtKQtdKxSHjb90Y=,tag:sHikRF8/3+VDnVKtWEtcSA==,type:str]
pgp:
- created_at: "2021-04-14T14:49:06Z"
enc: |
-----BEGIN PGP MESSAGE-----
hQGMA/JorPHm1g9XAQv/RCNYZMMGchIhqCt7S0jCFaGTqWvtydckIGQLZN3CCwmo
xfMoaGf43yMKER21ilP3CY/EXQNzwz2di5M0/biofkaH5yiohcufECS6+rB9J/wI
Ub5RsMuNdnZSNzsNTd/T3PgUbhuqNOiOBv3BM59SfbMa3z1w3StFdWk0h4zXfezc
Vj/wtpV+1SonfCZ0QWqRB/crnAYSASoINS8kqU3I53VkoDM6pWoX4mjA7V+5x3aL
5ZdqvUte42ANqNG9SLnnLQzhjKxEnb1K3R1VB2qmvCmWB3aY8hq9zKuK/x6WH9B4
rtBiIB3BCtJeUC0rGRvBNlfxPDdegDWqae7y6JdQWRB4QaoYxVzKPNS0Msz7zjlH
Rf75ZWWUJnKmHKzAQBHrgegUiR4GipEe5v63m0kInM3J8MHtolkJ22kCXeancYWl
XnnZwWmyVz46BTR71EvdbApSmlDQjRCK3x/5FodtCZeWP1QEfC0lwRAlk2lyrPx7
/L8KnFLK+NF9uR2Xylzf0l4BD+mNEAfIq7hvy4Gh8Ek50gpAmNGLq6zRNj0Sh6dz
zbVyYHYIwEXCnvaN8UNumSqvTQ9e322bRXsYwVLLQXT58ZX/jbzvSwUkNalTJamx
X6t5Qj8/5XOjupH0IoR0
=8fGE
-----END PGP MESSAGE-----
fp: C8F8A49D04A1AB639F8EA21CDBA4B1DCB1FA5BDD
encrypted_regex: ^(data|stringData)$
version: 3.6.1

View File

@@ -0,0 +1,17 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-backups
provisioner: driver.longhorn.io
allowVolumeExpansion: true
reclaimPolicy: Retain
parameters:
numberOfReplicas: "3"
staleReplicaTimeout: "2880"
fromBackup: ""
diskSelector: "ssd,fast"
nodeSelector: "storage,fast"
recurringJobs:
'[{"name":"backup", "task":"backup", "cron":"30 23 * * *", "retain":1,
"labels": {"interval":"daily"}}]'

View File

@@ -0,0 +1,79 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: node-feature-discovery
namespace: kube-system
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://kubernetes-sigs.github.io/node-feature-discovery/charts
chart: node-feature-discovery
version: 0.8.1
sourceRef:
kind: HelmRepository
name: node-feature-discovery-charts
namespace: flux-system
interval: 5m
values:
master:
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: In
values: [""]
worker:
config: |-
core:
sources:
- custom
- pci
- usb
sources:
usb:
deviceClassWhitelist:
- "02"
- "03"
- "0e"
- "ef"
- "fe"
- "ff"
deviceLabelFields:
- "class"
- "vendor"
- "device"
custom:
- name: "zwave"
matchOn:
- usbId:
class: ["02"]
vendor: ["0658"]
device: ["0200"]
- name: "zigbee"
matchOn:
- usbId:
class: ["ff"]
vendor: ["1a86"]
device: ["7523"]
- name: "intel-gpu"
matchOn:
- pciId:
class: ["0300"]
vendor: ["8086"]
annotations:
configmap.reloader.stakater.com/reload: "nfd-worker-conf"
tolerations:
- effect: "NoExecute"
operator: "Exists"
- effect: "NoSchedule"
operator: "Exists"

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- system-upgrade-controller.yaml

View File

@@ -0,0 +1,98 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: system-upgrade
namespace: system-upgrade
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system-upgrade
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: system-upgrade
namespace: system-upgrade
---
apiVersion: v1
kind: ConfigMap
metadata:
name: default-controller-env
namespace: system-upgrade
data:
SYSTEM_UPGRADE_CONTROLLER_DEBUG: "false"
SYSTEM_UPGRADE_CONTROLLER_THREADS: "2"
SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS: "900"
SYSTEM_UPGRADE_JOB_BACKOFF_LIMIT: "99"
SYSTEM_UPGRADE_JOB_IMAGE_PULL_POLICY: "Always"
SYSTEM_UPGRADE_JOB_KUBECTL_IMAGE: "rancher/kubectl:v1.19.7"
SYSTEM_UPGRADE_JOB_PRIVILEGED: "true"
SYSTEM_UPGRADE_JOB_TTL_SECONDS_AFTER_FINISH: "900"
SYSTEM_UPGRADE_PLAN_POLLING_INTERVAL: "15m"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: system-upgrade-controller
namespace: system-upgrade
spec:
selector:
matchLabels:
upgrade.cattle.io/controller: system-upgrade-controller
template:
metadata:
labels:
upgrade.cattle.io/controller: system-upgrade-controller
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: In
values:
- "true"
serviceAccountName: system-upgrade
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: system-upgrade-controller
image: rancher/system-upgrade-controller:v0.6.2
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
name: default-controller-env
env:
- name: SYSTEM_UPGRADE_CONTROLLER_NAME
valueFrom:
fieldRef:
fieldPath: metadata.labels['upgrade.cattle.io/controller']
- name: SYSTEM_UPGRADE_CONTROLLER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: etc-ssl
mountPath: /etc/ssl
- name: tmp
mountPath: /tmp
volumes:
- name: etc-ssl
hostPath:
path: /etc/ssl
type: Directory
- name: tmp
emptyDir: {}

View File

@@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- crds
- infrastructure
- namespaces
- operators

View File

@@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: data
labels:
goldilocks.fairwinds.com/enabled: "true"

View File

@@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: default

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: development
labels:
goldilocks.fairwinds.com/enabled: "true"

View File

@@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: flux-system

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: home
labels:
goldilocks.fairwinds.com/enabled: "true"

View File

@@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cert-manager.yaml
- data.yaml
- default.yaml
- development.yaml
- flux-system.yaml
- home.yaml
- kube-system.yaml
- longhorn-system.yaml
- media.yaml
- monitoring.yaml
- networking.yaml
- system-upgrade.yaml

View File

@@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: longhorn-system

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: media
labels:
goldilocks.fairwinds.com/enabled: "true"

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: monitoring
labels:
goldilocks.fairwinds.com/enabled: "true"

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: networking
labels:
goldilocks.fairwinds.com/enabled: "true"

View File

@@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: system-upgrade

View File

@@ -0,0 +1,19 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cert-manager-webhook-ovh
namespace: cert-manager
spec:
interval: 5m
chart:
spec:
chart: ./deploy/cert-manager-webhook-ovh
version: 0.2.0
sourceRef:
kind: GitRepository
name: cert-manager-webhook-ovh
namespace: flux-system
interval: 1440m
values:
groupName: "${SECRET_CLUSTER_DOMAIN_ROOT}"

View File

@@ -0,0 +1,32 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cert-manager
namespace: cert-manager
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://charts.jetstack.io/
chart: cert-manager
version: v1.3.0
sourceRef:
kind: HelmRepository
name: jetstack-charts
namespace: flux-system
interval: 5m
values:
installCRDs: false
webhook:
enabled: true
extraArgs:
- --dns01-recursive-nameservers=ns15.ovh.net:53,dns15.ovh.net:53
- --dns01-recursive-nameservers-only
cainjector:
replicaCount: 1
prometheus:
enabled: true
servicemonitor:
enabled: true
prometheusInstance: monitoring

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cert-manager-webhook-ovh-helm-release.yaml
- cert-manager.yaml