🔧 use raw chart for kyverno

This commit is contained in:
auricom
2022-10-20 05:22:32 +02:00
parent 70d30f205e
commit 51dd250bb7
13 changed files with 340 additions and 348 deletions

View File

@@ -22,12 +22,8 @@ spec:
remediation:
retries: 5
values:
installCRDs: false
replicaCount: 1
extraArgs:
- --autogenInternals=false
- --clientRateLimitQPS=30
- --clientRateLimitBurst=60
installCRDs: true
replicaCount: 3
serviceMonitor:
enabled: true
topologySpreadConstraints:

View File

@@ -2,8 +2,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# renovate: registryUrl=https://kyverno.github.io/kyverno/ chart=kyverno
- github.com/kyverno/kyverno//config/crds?ref=kyverno-chart-2.6.0
- helm-release.yaml
- rbac.yaml
- policies

View File

@@ -1,36 +0,0 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: apply-ingress-auth-annotations
annotations:
policies.kyverno.io/title: Apply Ingress Auth Annotations
policies.kyverno.io/subject: Ingress
policies.kyverno.io/description: >-
This policy creates auth annotations on ingresses. When
the `auth.home.arpa/enabled` annotation is `true` it
applies the nginx auth annotations for use with Authelia.
spec:
mutateExistingOnPolicyUpdate: true
generateExistingOnPolicyUpdate: true
rules:
- name: auth
match:
any:
- resources:
kinds: ["Ingress"]
annotations:
auth.home.arpa/enabled: "true"
mutate:
patchStrategicMerge:
metadata:
annotations:
+(nginx.ingress.kubernetes.io/auth-method): GET
+(nginx.ingress.kubernetes.io/auth-url): |-
http://authelia.default.svc.cluster.local./api/verify
+(nginx.ingress.kubernetes.io/auth-signin): |-
https://auth.${SECRET_CLUSTER_DOMAIN}?rm=$request_method
+(nginx.ingress.kubernetes.io/auth-response-headers): |-
Remote-User,Remote-Name,Remote-Groups,Remote-Email
+(nginx.ingress.kubernetes.io/auth-snippet): |
proxy_set_header X-Forwarded-Method $request_method;

View File

@@ -1,30 +0,0 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: apply-ingress-external-dns-annotations
annotations:
policies.kyverno.io/title: Apply Ingress External-DNS Annotations
policies.kyverno.io/subject: Ingress
policies.kyverno.io/description: >-
This policy creates external-dns annotations on ingresses.
When the `external-dns.home.arpa/enabled` annotation is `true`
it applies the external-dns annotations for use with external
application access.
spec:
mutateExistingOnPolicyUpdate: true
generateExistingOnPolicyUpdate: true
rules:
- name: external-dns
match:
any:
- resources:
kinds: ["Ingress"]
annotations:
external-dns.home.arpa/enabled: "true"
mutate:
patchStrategicMerge:
metadata:
annotations:
+(external-dns.alpha.kubernetes.io/target): |-
services.${SECRET_DOMAIN}.

View File

@@ -1,33 +0,0 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: apply-ingress-whitelist-annotations
annotations:
policies.kyverno.io/title: Apply Ingress Whitelist Annotations
policies.kyverno.io/subject: Ingress
policies.kyverno.io/description: >-
This policy creates annotations on ingresses. When
the `external-dns.home.arpa/enabled` annotation is not
set it applies the nginx annotations for use with only
internal application access.
spec:
mutateExistingOnPolicyUpdate: true
generateExistingOnPolicyUpdate: true
rules:
- name: whitelist
match:
any:
- resources:
kinds: ["Ingress"]
exclude:
any:
- resources:
annotations:
external-dns.home.arpa/enabled: "true"
mutate:
patchStrategicMerge:
metadata:
annotations:
+(nginx.ingress.kubernetes.io/whitelist-source-range): |-
10.0.0.0/8,172.16.0.0/12,192.168.0.0/16

View File

@@ -1,51 +0,0 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: delete-cpu-limits
annotations:
policies.kyverno.io/title: Delete CPU limits
policies.kyverno.io/subject: Pod
policies.kyverno.io/description: >-
This policy deletes CPU limits from all Pods.
spec:
mutateExistingOnPolicyUpdate: true
generateExistingOnPolicyUpdate: true
rules:
- name: delete-cpu-limits
match:
any:
- resources:
kinds: ["Pod"]
exclude:
any:
- resources:
namespaces:
- calico-system
- tigera-operator
- resources:
kinds: ["Pod"]
selector:
matchLabels:
job-name: "*"
- resources:
kinds: ["Pod"]
selector:
matchLabels:
statefulset.kubernetes.io/pod-name: "*"
- resources:
annotations:
kyverno.io/ignore: "true"
mutate:
patchStrategicMerge:
spec:
initContainers:
- (name): "*"
resources:
limits:
cpu: null
containers:
- (name): "*"
resources:
limits:
cpu: null

View File

@@ -0,0 +1,327 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kyverno-policies
namespace: kyverno
spec:
interval: 15m
chart:
spec:
chart: raw
version: v0.3.1
sourceRef:
kind: HelmRepository
name: dysnix-charts
namespace: flux-system
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
dependsOn:
- name: kyverno
namespace: kyverno
values:
resources:
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kyverno:admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: admin
subjects:
- kind: ServiceAccount
name: kyverno
namespace: kyverno
- apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: apply-ingress-auth-annotations
annotations:
policies.kyverno.io/title: Apply Ingress Auth Annotations
policies.kyverno.io/subject: Ingress
policies.kyverno.io/description: >-
This policy creates auth annotations on ingresses. When
the `auth.home.arpa/enabled` annotation is `true` it
applies the nginx auth annotations for use with Authelia.
spec:
mutateExistingOnPolicyUpdate: true
generateExistingOnPolicyUpdate: true
rules:
- name: auth
match:
any:
- resources:
kinds: ["Ingress"]
annotations:
auth.home.arpa/enabled: "true"
mutate:
patchStrategicMerge:
metadata:
annotations:
+(nginx.ingress.kubernetes.io/auth-method): GET
+(nginx.ingress.kubernetes.io/auth-url): |-
http://authelia.default.svc.cluster.local./api/verify
+(nginx.ingress.kubernetes.io/auth-signin): |-
https://auth.${SECRET_CLUSTER_DOMAIN}?rm=$request_method
+(nginx.ingress.kubernetes.io/auth-response-headers): |-
Remote-User,Remote-Name,Remote-Groups,Remote-Email
+(nginx.ingress.kubernetes.io/auth-snippet): |
proxy_set_header X-Forwarded-Method $request_method;
- apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: apply-ingress-external-dns-annotations
annotations:
policies.kyverno.io/title: Apply Ingress External-DNS Annotations
policies.kyverno.io/subject: Ingress
policies.kyverno.io/description: >-
This policy creates external-dns annotations on ingresses.
When the `external-dns.home.arpa/enabled` annotation is `true`
it applies the external-dns annotations for use with external
application access.
spec:
mutateExistingOnPolicyUpdate: true
generateExistingOnPolicyUpdate: true
rules:
- name: external-dns
match:
any:
- resources:
kinds: ["Ingress"]
annotations:
external-dns.home.arpa/enabled: "true"
mutate:
patchStrategicMerge:
metadata:
annotations:
+(external-dns.alpha.kubernetes.io/target): |-
services.${SECRET_DOMAIN}.
- apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: apply-ingress-whitelist-annotations
annotations:
policies.kyverno.io/title: Apply Ingress Whitelist Annotations
policies.kyverno.io/subject: Ingress
policies.kyverno.io/description: >-
This policy creates annotations on ingresses. When
the `external-dns.home.arpa/enabled` annotation is not
set it applies the nginx annotations for use with only
internal application access.
spec:
mutateExistingOnPolicyUpdate: true
generateExistingOnPolicyUpdate: true
rules:
- name: whitelist
match:
any:
- resources:
kinds: ["Ingress"]
exclude:
any:
- resources:
annotations:
external-dns.home.arpa/enabled: "true"
mutate:
patchStrategicMerge:
metadata:
annotations:
+(nginx.ingress.kubernetes.io/whitelist-source-range): |-
10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
- apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: delete-cpu-limits
annotations:
policies.kyverno.io/title: Delete CPU limits
policies.kyverno.io/subject: Pod
policies.kyverno.io/description: >-
This policy deletes CPU limits from all Pods.
spec:
mutateExistingOnPolicyUpdate: true
generateExistingOnPolicyUpdate: true
rules:
- name: delete-cpu-limits
match:
any:
- resources:
kinds: ["Pod"]
exclude:
any:
- resources:
namespaces:
- calico-system
- tigera-operator
- resources:
kinds: ["Pod"]
selector:
matchLabels:
job-name: "*"
- resources:
kinds: ["Pod"]
selector:
matchLabels:
statefulset.kubernetes.io/pod-name: "*"
- resources:
annotations:
kyverno.io/ignore: "true"
mutate:
patchStrategicMerge:
spec:
initContainers:
- (name): "*"
resources:
limits:
cpu: null
containers:
- (name): "*"
resources:
limits:
cpu: null
- apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: snapshot-cronjob-controller
annotations:
policies.kyverno.io/title: Snapshot CronJob controller
policies.kyverno.io/subject: PersistentVolumeClaim
policies.kyverno.io/description: |
This policy creates a Kopia snapshot CronJob for labeled PersistentVolumeClaims
The following labels on PVCs with their respective labels are required for this to run:
- snapshot.home.arpa/enabled
- app.kubernetes.io/name
- app.kubernetes.io/instance
An optional label of "snapshot.home.arpa/ignoreAffinity" may be set on the PVC
if the pod is guaranteed to not run during the time of this jobs execution
spec:
generateExistingOnPolicyUpdate: true
mutateExistingOnPolicyUpdate: true
rules:
- name: create-snapshot-cronjob
match:
any:
- resources:
kinds:
- PersistentVolumeClaim
selector:
matchLabels:
snapshot.home.arpa/enabled: "true"
app.kubernetes.io/name: "*"
app.kubernetes.io/instance: "*"
context:
- name: appName
variable:
jmesPath: 'request.object.metadata.labels."app.kubernetes.io/name"'
- name: claimName
variable:
jmesPath: "request.object.metadata.name"
- name: namespace
variable:
jmesPath: "request.object.metadata.namespace"
- name: nodeAffinity
variable:
value:
ignored: '{{ (request.object.metadata.labels."snapshot.home.arpa/ignoreAffinity" || ''false'') == ''false'' }}'
labels:
- key: app.kubernetes.io/name
operator: "In"
values:
- '{{ request.object.metadata.labels."app.kubernetes.io/name" }}'
- key: app.kubernetes.io/instance
operator: "In"
values:
- '{{ request.object.metadata.labels."app.kubernetes.io/instance" }}'
generate:
synchronize: true
apiVersion: batch/v1
kind: CronJob
name: "{{ appName }}-{{ claimName }}-snapshot"
namespace: "{{ request.object.metadata.namespace }}"
data:
metadata:
labels:
app.kubernetes.io/name: '{{ request.object.metadata.labels."app.kubernetes.io/name" }}'
app.kubernetes.io/instance: '{{ request.object.metadata.labels."app.kubernetes.io/instance" }}'
ownerReferences:
- apiVersion: "{{ request.object.apiVersion }}"
kind: "{{ request.object.kind }}"
name: "{{ request.object.metadata.name }}"
uid: "{{ request.object.metadata.uid }}"
spec:
schedule: "0 3 * * *"
suspend: false
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 2
jobTemplate:
spec:
# Keep at least one job in completed state in accordance to the schedule
ttlSecondsAfterFinished: 86400
template:
spec:
automountServiceAccountToken: false
restartPolicy: OnFailure
# Stagger jobs to run randomly within X seconds to avoid bringing down all apps at once
initContainers:
- name: wait
image: ghcr.io/onedr0p/kopia:0.12.1@sha256:e333295b519ce586e7c050c970b2255d87bdb2979298ff87ebdb1113e381ba3b
command: ["/scripts/sleep.sh"]
args: ["1", "900"]
containers:
- name: snapshot
image: ghcr.io/onedr0p/kopia:0.12.1@sha256:e333295b519ce586e7c050c970b2255d87bdb2979298ff87ebdb1113e381ba3b
env:
- name: KOPIA_CACHE_DIRECTORY
value: /snapshots/{{ namespace }}/{{ appName }}/{{ claimName }}/cache
- name: KOPIA_LOG_DIR
value: /snapshots/{{ namespace }}/{{ appName }}/{{ claimName }}/logs
- name: KOPIA_PASSWORD
value: "none"
command:
- /bin/bash
- -c
- |-
printf "\e[1;32m%-6s\e[m\n" "[01/10] Create repo ..." && [[ ! -f /snapshots/kopia.repository.f ]] && kopia repository create filesystem --path=/snapshots
printf "\e[1;32m%-6s\e[m\n" "[02/10] Connect to repo ..." && kopia repo connect filesystem --path=/snapshots --override-hostname=cluster --override-username=root
printf "\e[1;32m%-6s\e[m\n" "[03/10] Set policies ..." && kopia policy set /data/{{ namespace }}/{{ appName }}/{{ claimName }} --compression=zstd --keep-latest 14 --keep-hourly 0 --keep-daily 7 --keep-weekly 2 --keep-monthly 0 --keep-annual 0
printf "\e[1;32m%-6s\e[m\n" "[04/10] Freeze {{ claimName }} ..." && fsfreeze -f /data/{{ namespace }}/{{ appName }}/{{ claimName }}
printf "\e[1;32m%-6s\e[m\n" "[05/10] Snapshot {{ claimName }} ..." && kopia snap create /data/{{ namespace }}/{{ appName }}/{{ claimName }}
printf "\e[1;32m%-6s\e[m\n" "[06/10] Unfreeze {{ claimName }} ..." && fsfreeze -u /data/{{ namespace }}/{{ appName }}/{{ claimName }}
printf "\e[1;32m%-6s\e[m\n" "[07/10] List snapshots ..." && kopia snap list /data/{{ namespace }}/{{ appName }}/{{ claimName }}
printf "\e[1;32m%-6s\e[m\n" "[08/10] Show stats ..." && kopia content stats
printf "\e[1;32m%-6s\e[m\n" "[09/10] Show maintenance info ..." && kopia maintenance info
printf "\e[1;32m%-6s\e[m\n" "[10/10] Disconnect from repo ..." && kopia repo disconnect
volumeMounts:
- name: data
mountPath: "/data/{{ namespace }}/{{ appName }}/{{ claimName }}"
- name: snapshots
mountPath: /snapshots
securityContext:
privileged: true
volumes:
- name: data
persistentVolumeClaim:
claimName: "{{ claimName }}"
- name: snapshots
nfs:
server: "expanse.${SECRET_PRIVATE_DOMAIN}"
path: /eros/Apps/Kopia
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions: "{{ nodeAffinity.ignored && [] || nodeAffinity.labels }}"

View File

@@ -2,9 +2,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- apply-ingress-auth-annotations.yaml
- apply-ingress-external-dns-annotations.yaml
- apply-ingress-whitelist-annotations.yaml
- delete-cpu-limits.yaml
- snapshot-cronjob-controller.yaml
- sync-postgres-secrets.yaml
- helm-release.yaml

View File

@@ -1,137 +0,0 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: snapshot-cronjob-controller
annotations:
policies.kyverno.io/title: Snapshot CronJob controller
policies.kyverno.io/subject: PersistentVolumeClaim
policies.kyverno.io/description: |
This policy creates a Kopia snapshot CronJob for labeled PersistentVolumeClaims
The following labels on PVCs with their respective labels are required for this to run:
- snapshot.home.arpa/enabled
- app.kubernetes.io/name
- app.kubernetes.io/instance
An optional label of "snapshot.home.arpa/ignoreAffinity" may be set on the PVC
if the pod is guaranteed to not run during the time of this jobs execution
spec:
generateExistingOnPolicyUpdate: true
mutateExistingOnPolicyUpdate: true
rules:
- name: create-snapshot-cronjob
match:
any:
- resources:
kinds:
- PersistentVolumeClaim
selector:
matchLabels:
snapshot.home.arpa/enabled: "true"
app.kubernetes.io/name: "*"
app.kubernetes.io/instance: "*"
context:
- name: appName
variable:
jmesPath: "request.object.metadata.labels.\"app.kubernetes.io/name\""
- name: claimName
variable:
jmesPath: "request.object.metadata.name"
- name: namespace
variable:
jmesPath: "request.object.metadata.namespace"
- name: nodeAffinity
variable:
value:
ignored: "{{ (request.object.metadata.labels.\"snapshot.home.arpa/ignoreAffinity\" || 'false') == 'false' }}"
labels:
- key: app.kubernetes.io/name
operator: "In"
values:
- "{{ request.object.metadata.labels.\"app.kubernetes.io/name\" }}"
- key: app.kubernetes.io/instance
operator: "In"
values:
- "{{ request.object.metadata.labels.\"app.kubernetes.io/instance\" }}"
generate:
synchronize: true
apiVersion: batch/v1
kind: CronJob
name: "{{ appName }}-{{ claimName }}-snapshot"
namespace: "{{ request.object.metadata.namespace }}"
data:
metadata:
labels:
app.kubernetes.io/name: "{{ request.object.metadata.labels.\"app.kubernetes.io/name\" }}"
app.kubernetes.io/instance: "{{ request.object.metadata.labels.\"app.kubernetes.io/instance\" }}"
ownerReferences:
- apiVersion: "{{ request.object.apiVersion }}"
kind: "{{ request.object.kind }}"
name: "{{ request.object.metadata.name }}"
uid: "{{ request.object.metadata.uid }}"
spec:
schedule: "0 3 * * *"
suspend: false
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 2
jobTemplate:
spec:
# Keep at least one job in completed state in accordance to the schedule
ttlSecondsAfterFinished: 86400
template:
spec:
automountServiceAccountToken: false
restartPolicy: OnFailure
# Stagger jobs to run randomly within X seconds to avoid bringing down all apps at once
initContainers:
- name: wait
image: ghcr.io/onedr0p/kopia:0.12.1@sha256:e333295b519ce586e7c050c970b2255d87bdb2979298ff87ebdb1113e381ba3b
command: ["/scripts/sleep.sh"]
args: ["1", "900"]
containers:
- name: snapshot
image: ghcr.io/onedr0p/kopia:0.12.1@sha256:e333295b519ce586e7c050c970b2255d87bdb2979298ff87ebdb1113e381ba3b
env:
- name: KOPIA_CACHE_DIRECTORY
value: /snapshots/{{ namespace }}/{{ appName }}/{{ claimName }}/cache
- name: KOPIA_LOG_DIR
value: /snapshots/{{ namespace }}/{{ appName }}/{{ claimName }}/logs
- name: KOPIA_PASSWORD
value: "none"
command:
- /bin/bash
- -c
- |-
printf "\e[1;32m%-6s\e[m\n" "[01/10] Create repo ..." && [[ ! -f /snapshots/kopia.repository.f ]] && kopia repository create filesystem --path=/snapshots
printf "\e[1;32m%-6s\e[m\n" "[02/10] Connect to repo ..." && kopia repo connect filesystem --path=/snapshots --override-hostname=cluster --override-username=root
printf "\e[1;32m%-6s\e[m\n" "[03/10] Set policies ..." && kopia policy set /data/{{ namespace }}/{{ appName }}/{{ claimName }} --compression=zstd --keep-latest 14 --keep-hourly 0 --keep-daily 7 --keep-weekly 2 --keep-monthly 0 --keep-annual 0
printf "\e[1;32m%-6s\e[m\n" "[04/10] Freeze {{ claimName }} ..." && fsfreeze -f /data/{{ namespace }}/{{ appName }}/{{ claimName }}
printf "\e[1;32m%-6s\e[m\n" "[05/10] Snapshot {{ claimName }} ..." && kopia snap create /data/{{ namespace }}/{{ appName }}/{{ claimName }}
printf "\e[1;32m%-6s\e[m\n" "[06/10] Unfreeze {{ claimName }} ..." && fsfreeze -u /data/{{ namespace }}/{{ appName }}/{{ claimName }}
printf "\e[1;32m%-6s\e[m\n" "[07/10] List snapshots ..." && kopia snap list /data/{{ namespace }}/{{ appName }}/{{ claimName }}
printf "\e[1;32m%-6s\e[m\n" "[08/10] Show stats ..." && kopia content stats
printf "\e[1;32m%-6s\e[m\n" "[09/10] Show maintenance info ..." && kopia maintenance info
printf "\e[1;32m%-6s\e[m\n" "[10/10] Disconnect from repo ..." && kopia repo disconnect
volumeMounts:
- name: data
mountPath: "/data/{{ namespace }}/{{ appName }}/{{ claimName }}"
- name: snapshots
mountPath: /snapshots
securityContext:
privileged: true
volumes:
- name: data
persistentVolumeClaim:
claimName: "{{ claimName }}"
- name: snapshots
nfs:
server: "expanse.${SECRET_PRIVATE_DOMAIN}"
path: /eros/Apps/Kopia
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions: "{{ nodeAffinity.ignored && [] || nodeAffinity.labels }}"

View File

@@ -1,33 +0,0 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: sync-postgres-secrets
annotations:
policies.kyverno.io/title: Sync Postgres Secrets
policies.kyverno.io/subject: Secret
policies.kyverno.io/description: >-
This policy will copy a secret called `postgres-superuser` which
exists in the `database` namespace to new namespaces when they are
created. It will also push updates to the copied Secrets should the
source secret be changed.
spec:
mutateExistingOnPolicyUpdate: true
generateExistingOnPolicyUpdate: true
rules:
- name: sync-postgres-superuser-secret
match:
resources:
kinds: ["Namespace"]
exclude:
resources:
namespaces: ["default"]
generate:
apiVersion: v1
kind: Secret
name: postgres-superuser
namespace: "{{request.object.metadata.name}}"
synchronize: true
clone:
namespace: default
name: postgres-superuser

View File

@@ -1,13 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kyverno:admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: admin
subjects:
- kind: ServiceAccount
name: kyverno
namespace: kyverno