♻️ flux kustomizations

This commit is contained in:
auricom
2022-12-26 15:24:33 +01:00
parent b4572bf19a
commit ca31e11491
730 changed files with 6825 additions and 3766 deletions

View File

@@ -1,3 +0,0 @@
{
"ignore": ["**/truenas/files/scripts/**"]
}

View File

@@ -1,23 +0,0 @@
---
default: true
# MD013/line-length - Line length
MD013:
# Number of characters
line_length: 240
# Number of characters for headings
heading_line_length: 80
# Number of characters for code blocks
code_block_line_length: 300
# Include code blocks
code_blocks: true
# Include tables
tables: true
# Include headings
headings: true
# Include headings
headers: true
# Strict length checking
strict: false
# Stern length checking
stern: false

View File

@@ -1,7 +0,0 @@
charts/
docs/
.private/
.terraform/
.vscode/
*.sops.*
gotk-components.yaml

View File

@@ -1,7 +0,0 @@
---
trailingComma: "es5"
tabWidth: 2
semi: false
singleQuote: false
bracketSpacing: false
useTabs: false

View File

@@ -1,23 +0,0 @@
---
ignore: |
charts/
docs/
.private/
.terraform/
.vscode/
*.sops.*
gotk-components.yaml
extends: default
rules:
truthy:
allowed-values: ["true", "false", "on"]
comments:
min-spaces-from-content: 1
line-length: disable
braces:
min-spaces-inside: 0
max-spaces-inside: 1
brackets:
min-spaces-inside: 0
max-spaces-inside: 0
indentation: disable

View File

@@ -1,23 +1,23 @@
name: "Renovate"
on:
workflow_dispatch:
inputs:
dryRun:
description: "Dry-Run"
default: "false"
required: false
logLevel:
description: "Log-Level"
default: "debug"
required: false
schedule:
- cron: "0 * * * *"
push:
branches: ["main"]
paths:
- ".github/renovate.json5"
- ".github/renovate/**.json5"
# workflow_dispatch:
# inputs:
# dryRun:
# description: "Dry-Run"
# default: "false"
# required: false
# logLevel:
# description: "Log-Level"
# default: "debug"
# required: false
# schedule:
# - cron: "0 * * * *"
# push:
# branches: ["main"]
# paths:
# - ".github/renovate.json5"
# - ".github/renovate/**.json5"
env:
LOG_LEVEL: debug

View File

@@ -3,13 +3,6 @@
# See https://pre-commit.com/hooks.html for more hooks
fail_fast: false
repos:
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.28.0
hooks:
- id: yamllint
args:
- --config-file
- .github/linters/.yamllint.yaml
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
@@ -34,10 +27,3 @@ repos:
rev: v2.1.1
hooks:
- id: forbid-secrets
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.32.2
hooks:
- id: markdownlint
args:
- --config
- ".github/linters/.markdownlint.yaml"

View File

@@ -8,7 +8,7 @@ creation_rules:
key_groups:
- age:
- age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
- path_regex: infrastructure/ansible/.*\.sops\.ya?ml
- path_regex: ansible/.*\.sops\.ya?ml
unencrypted_regex: ^(kind)$
key_groups:
- age:
@@ -17,4 +17,3 @@ creation_rules:
key_groups:
- age:
- age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg

View File

@@ -0,0 +1,13 @@
---
version: "3"
tasks:
deps:
desc: Install/Upgrade Ansible deps
dir: '{{.ANSIBLE_DIR}}'
cmds:
- ansible-galaxy install -r requirements.yml --roles-path ~/.ansible/roles --force
- ansible-galaxy collection install -r requirements.yml --collections-path ~/.ansible/collections --force
preconditions:
- test -f "{{.ANSIBLE_DIR}}/requirements.yml"

16
.taskfiles/Flux/Tasks.yml Normal file
View File

@@ -0,0 +1,16 @@
---
version: "3"
tasks:
sync:
desc: Sync Flux resources
cmds:
- |
kubectl get gitrepositories --all-namespaces --no-headers -A | awk '{print $1, $2}' \
| xargs --max-procs=4 -l bash -c \
'kubectl -n $0 annotate gitrepositories $1 reconcile.fluxcd.io/requestedAt=$(date +%s) --field-manager=flux-client-side-apply --overwrite'
- |
kubectl get kustomization --all-namespaces --no-headers -A | awk '{print $1, $2}' \
| xargs --max-procs=4 -l bash -c \
'kubectl -n $0 annotate kustomization $1 reconcile.fluxcd.io/requestedAt="$(date +%s)" --field-manager=flux-client-side-apply --overwrite'

View File

@@ -0,0 +1,48 @@
---
version: "3"
tasks:
mount:
desc: Mount a PersistantVolumeClaim to a pod temporarily
interactive: true
vars:
claim: '{{ or .claim (fail "PersistentVolumeClaim `claim` is required") }}'
namespace: '{{.namespace | default "default"}}'
cmds:
- |
kubectl run -n {{.namespace}} debug-{{.claim}} -i --tty --rm --image=null --privileged --overrides='
{
"apiVersion": "v1",
"spec": {
"containers": [
{
"name": "debug",
"image": "ghcr.io/onedr0p/alpine:rolling",
"command": [
"/bin/bash"
],
"stdin": true,
"stdinOnce": true,
"tty": true,
"volumeMounts": [
{
"name": "config",
"mountPath": "/data/config"
}
]
}
],
"volumes": [
{
"name": "config",
"persistentVolumeClaim": {
"claimName": "{{.claim}}"
}
}
],
"restartPolicy": "Never"
}
}'
preconditions:
- kubectl -n {{.namespace}} get pvc {{.claim}}

View File

@@ -0,0 +1,19 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: "list-${rsrc}-${ts}"
namespace: "${namespace}"
spec:
ttlSecondsAfterFinished: 3600
template:
spec:
automountServiceAccountToken: false
restartPolicy: OnFailure
containers:
- name: list
image: docker.io/restic/restic:0.14.0
args: ["snapshots"]
envFrom:
- secretRef:
name: "${rsrc}-restic"

View File

@@ -0,0 +1,20 @@
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationDestination
metadata:
name: "${rsrc}-${claim}-${ts}"
namespace: "${namespace}"
spec:
trigger:
manual: restore-once
restic:
repository: "${rsrc}-restic"
destinationPVC: "${claim}"
copyMethod: Direct
# IMPORTANT NOTE:
# On bootstrap set `restoreAsOf` to the time the old cluster was destroyed.
# This will essentially prevent volsync from trying to restore a backup
# from a application that started with default data in the PVC.
# Do not restore snapshots made after the following RFC3339 Timestamp.
# date --rfc-3339=seconds (--utc)
# restoreAsOf: "2022-12-10T16:00:00-05:00"

View File

@@ -0,0 +1,136 @@
---
version: "3"
x-task-vars: &task-vars
rsrc: '{{.rsrc}}'
controller: '{{.controller}}'
namespace: '{{.namespace}}'
claim: '{{.claim}}'
ts: '{{.ts}}'
kustomization: '{{.kustomization}}'
vars:
destinationTemplate: "{{.PROJECT_DIR}}/.taskfiles/VolSync/ReplicationDestination.tmpl.yaml"
wipeJobTemplate: "{{.PROJECT_DIR}}/.taskfiles/VolSync/WipeJob.tmpl.yaml"
waitForJobScript: "{{.PROJECT_DIR}}/.taskfiles/VolSync/wait-for-job.sh"
listJobTemplate: "{{.PROJECT_DIR}}/.taskfiles/VolSync/ListJob.tmpl.yaml"
ts: '{{now | date "150405"}}'
tasks:
list:
desc: List all snapshots taken by restic for a given ReplicationSource (ex. task vs:list rsrc=plex [namespace=default])
silent: true
cmds:
- envsubst < {{.listJobTemplate}} | kubectl apply -f -
- bash {{.waitForJobScript}} list-{{.rsrc}}-{{.ts}} {{.namespace}}
- kubectl -n {{.namespace}} wait job/list-{{.rsrc}}-{{.ts}} --for condition=complete --timeout=1m
- kubectl -n {{.namespace}} logs job/list-{{.rsrc}}-{{.ts}} --container list
- kubectl -n {{.namespace}} delete job list-{{.rsrc}}-{{.ts}}
vars:
rsrc: '{{ or .rsrc (fail "ReplicationSource `rsrc` is required") }}'
namespace: '{{.namespace | default "default"}}'
env: *task-vars
preconditions:
- sh: test -f {{.waitForJobScript}}
- sh: test -f {{.listJobTemplate}}
# To run backup jobs in parallel for all replicationsources:
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task vs:snapshot rsrc=$0 namespace=$1'
#
snapshot:
desc: Trigger a Restic ReplicationSource snapshot (ex. task vs:snapshot rsrc=plex [namespace=default])
cmds:
- kubectl -n {{.namespace}} patch replicationsources {{.rsrc}} --type merge -p '{"spec":{"trigger":{"manual":"{{.ts}}"}}}'
- bash {{.waitForJobScript}} volsync-src-{{.rsrc}} {{.namespace}}
- kubectl -n {{.namespace}} wait job/volsync-src-{{.rsrc}} --for condition=complete --timeout=120m
# TODO: Error from server (NotFound): jobs.batch "volsync-src-zzztest" not found
# - kubectl -n {{.namespace}} logs job/volsync-src-{{.rsrc}}
vars:
rsrc: '{{ or .rsrc (fail "ReplicationSource `rsrc` is required") }}'
namespace: '{{.namespace | default "default"}}'
env: *task-vars
preconditions:
- sh: test -f {{.waitForJobScript}}
- sh: kubectl -n {{.namespace}} get replicationsources {{.rsrc}}
msg: "ReplicationSource '{{.rsrc}}' not found in namespace '{{.namespace}}'"
# To run restore jobs in parallel for all replicationdestinations:
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task vs:restore rsrc=$0 namespace=$1'
#
restore:
desc: Trigger a Restic ReplicationSource restore (ex. task vs:restore rsrc=plex [namespace=default])
cmds:
- task: restore-suspend-app
vars: *task-vars
- task: restore-wipe-job
vars: *task-vars
- task: restore-volsync-job
vars: *task-vars
- task: restore-resume-app
vars: *task-vars
vars:
rsrc: '{{ or .rsrc (fail "Variable `rsrc` is required") }}'
namespace: '{{.namespace | default "default"}}'
# 1) Query to find the Flux Kustomization associated with the ReplicationSource (rsrc)
kustomization:
sh: |
kubectl -n {{.namespace}} get replicationsource {{.rsrc}} \
-o jsonpath="{.metadata.labels.kustomize\.toolkit\.fluxcd\.io/name}"
# 2) Query to find the Claim associated with the ReplicationSource (rsrc)
claim:
sh: |
kubectl -n {{.namespace}} get replicationsource {{.rsrc}} \
-o jsonpath="{.spec.sourcePVC}"
# 3) Query to find the controller associated with the PersistentVolumeClaim (claim)
controller:
sh: |
app=$(kubectl -n {{.namespace}} get persistentvolumeclaim {{.claim}} -o jsonpath="{.metadata.labels.app\.kubernetes\.io/name}")
if [[ $(kubectl -n {{.namespace}} get deployment ${app}) ]]; then
echo "deployments.apps/$app" && exit 0
fi
echo "statefulsets.apps/$app"
env: *task-vars
preconditions:
- sh: test -f {{.wipeJobTemplate}}
- sh: test -f {{.destinationTemplate}}
- sh: test -f {{.waitForJobScript}}
# Suspend the Flux ks and hr
restore-suspend-app:
internal: true
cmds:
- flux -n flux-system suspend kustomization {{.kustomization}}
- flux -n {{.namespace}} suspend helmrelease {{.rsrc}}
- kubectl -n {{.namespace}} scale {{.controller}} --replicas 0
- kubectl -n {{.namespace}} wait pod --for delete --selector="app.kubernetes.io/name={{.rsrc}}" --timeout=2m
env: *task-vars
# Wipe the PVC of all data
restore-wipe-job:
internal: true
cmds:
- envsubst < <(cat {{.wipeJobTemplate}}) | kubectl apply -f -
- bash {{.waitForJobScript}} wipe-{{.rsrc}}-{{.claim}}-{{.ts}} {{.namespace}}
- kubectl -n {{.namespace}} wait job/wipe-{{.rsrc}}-{{.claim}}-{{.ts}} --for condition=complete --timeout=120m
- kubectl -n {{.namespace}} logs job/wipe-{{.rsrc}}-{{.claim}}-{{.ts}} --container wipe
- kubectl -n {{.namespace}} delete job wipe-{{.rsrc}}-{{.claim}}-{{.ts}}
env: *task-vars
# Create VolSync replicationdestination CR to restore data
restore-volsync-job:
internal: true
cmds:
- envsubst < <(cat {{.destinationTemplate}}) | kubectl apply -f -
- bash {{.waitForJobScript}} volsync-dst-{{.rsrc}}-{{.claim}}-{{.ts}} {{.namespace}}
- kubectl -n {{.namespace}} wait job/volsync-dst-{{.rsrc}}-{{.claim}}-{{.ts}} --for condition=complete --timeout=120m
- kubectl -n {{.namespace}} delete replicationdestination {{.rsrc}}-{{.claim}}-{{.ts}}
env: *task-vars
# Resume Flux ks and hr
restore-resume-app:
internal: true
cmds:
- flux -n {{.namespace}} resume helmrelease {{.rsrc}}
- flux -n flux-system resume kustomization {{.kustomization}}
env: *task-vars

View File

@@ -0,0 +1,25 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: "wipe-${rsrc}-${claim}-${ts}"
namespace: "${namespace}"
spec:
ttlSecondsAfterFinished: 3600
template:
spec:
automountServiceAccountToken: false
restartPolicy: OnFailure
containers:
- name: wipe
image: ghcr.io/onedr0p/alpine:3.17.0@sha256:8e1eb13c3ca5c038f3bf22a5fe9e354867f97f98a78027c44b7c76fce81fa61d
command: ["/bin/bash", "-c", "cd /config; find . -delete"]
volumeMounts:
- name: config
mountPath: /config
securityContext:
privileged: true
volumes:
- name: config
persistentVolumeClaim:
claimName: "${claim}"

View File

@@ -0,0 +1,14 @@
#!/usr/bin/env bash
JOB_NAME=$1
NAMESPACE="${2:-default}"
[[ -z "${JOB_NAME}" ]] && echo "Job name not specified" && exit 1
while true; do
STATUS="$(kubectl -n "${NAMESPACE}" get pod -l job-name="${JOB_NAME}" -o jsonpath='{.items[*].status.phase}')"
if [ "${STATUS}" == "Pending" ]; then
break
fi
sleep 1
done

View File

@@ -12,8 +12,8 @@ x-preconditions: &preconditions
msg: "Claim '{{.CLAIM}}' in namespace '{{.NAMESPACE}}' not found"
sh: kubectl get pvc -n {{.NAMESPACE}} {{.CLAIM}}
- &has-restore-job-file
msg: "File '{{.PROJECT_DIR}}/kubernetes/tools/kopia-restore.yaml' not found"
sh: "test -f {{.PROJECT_DIR}}/kubernetes/tools/kopia-restore.yaml"
msg: "File '{{.PROJECT_DIR}}/tools/kopia-restore.yaml' not found"
sh: "test -f {{.PROJECT_DIR}}/tools/kopia-restore.yaml"
x-vars: &vars
NAMESPACE:

View File

@@ -4,7 +4,18 @@ version: "3"
vars:
PROJECT_DIR:
sh: "git rev-parse --show-toplevel"
CLUSTER_DIR: "{{.PROJECT_DIR}}/cluster"
ANSIBLE_DIR: "{{.PROJECT_DIR}}/ansible"
CLUSTER_DIR: "{{.PROJECT_DIR}}/kubernetes"
includes:
an: .taskfiles/Ansible/Tasks.yml
fx: .taskfiles/Flux/Tasks.yml
kopia: .taskfiles/kopia.yaml
ku: .taskfiles/Kubernetes/Tasks.yml
vs: .taskfiles/VolSync/Tasks.yml
tasks:
default:
silent: true
cmds: ["task -l"]

View File

@@ -8,7 +8,3 @@ collections:
version: 2.3.2
- name: community.sops
version: 1.5.0
roles:
- name: xanmanning.k3s
src: https://github.com/PyratLabs/ansible-role-k3s.git
version: v3.3.1

View File

@@ -1,2 +0,0 @@
charts
clusterconfig

View File

@@ -4,7 +4,7 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cert-manager
namespace: default
namespace: cert-manager
spec:
interval: 15m
chart:

View File

@@ -0,0 +1,18 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: cert-manager
resources:
- ./helmrelease.yaml
- ./prometheusrule.yaml
configMapGenerator:
- name: cert-manager-dashboard
files:
- cert-manager-dashboard.json=https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/cert-manager/dashboards/cert-manager.json
generatorOptions:
disableNameSuffixHash: true
annotations:
kustomize.toolkit.fluxcd.io/substitute: disabled
labels:
grafana_dashboard: "true"

View File

@@ -0,0 +1,48 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomization_v1beta2.json
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
name: cluster-apps-cert-manager
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
path: ./kubernetes/apps/cert-manager/cert-manager/app
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
healthChecks:
- apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
name: cert-manager
namespace: cert-manager
interval: 30m
retryInterval: 1m
timeout: 3m
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomization_v1beta2.json
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
name: cluster-apps-cert-manager-webhook-ovh
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
dependsOn:
- name: cluster-apps-cert-manager
path: ./kubernetes/apps/cert-manager/cert-manager/webhook-ovh
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
healthChecks:
- apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
name: cert-manager-webhook-ovh
namespace: cert-manager
interval: 30m
retryInterval: 1m
timeout: 3m

View File

@@ -0,0 +1,58 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/helmrelease_v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cert-manager-webhook-ovh
namespace: cert-manager
spec:
interval: 15m
chart:
spec:
chart: cert-manager-webhook-ovh
version: v0.4.0
sourceRef:
kind: HelmRepository
name: cert-manager-webhook-ovh
namespace: flux-system
dependsOn:
- name: cert-manager
namespace: cert-manager
values:
groupName: "${SECRET_DOMAIN}"
certManager:
namespace: cert-manager
serviceAccountName: cert-manager
issuers:
- name: letsencrypt-staging
create: true
kind: ClusterIssuer
acmeServerUrl: https://acme-staging-v02.api.letsencrypt.org/directory
email: "${SECRET_CLUSTER_DOMAIN_EMAIL}"
ovhEndpointName: ovh-eu
ovhAuthenticationRef:
applicationKeyRef:
name: ovh-credentials
key: applicationKey
applicationSecretRef:
name: ovh-credentials
key: applicationSecret
consumerKeyRef:
name: ovh-credentials
key: consumerKey
- name: letsencrypt-production
create: true
kind: ClusterIssuer
acmeServerUrl: https://acme-v02.api.letsencrypt.org/directory
email: "${SECRET_CLUSTER_DOMAIN_EMAIL}"
ovhEndpointName: ovh-eu
ovhAuthenticationRef:
applicationKeyRef:
name: ovh-credentials
key: applicationKey
applicationSecretRef:
name: ovh-credentials
key: applicationSecret
consumerKeyRef:
name: ovh-credentials
key: consumerKey

View File

@@ -0,0 +1,30 @@
kind: Secret
apiVersion: v1
metadata:
name: ovh-credentials
namespace: cert-manager
stringData:
applicationKey: ENC[AES256_GCM,data:UYBGsO4gGWA1iPUqVAYnjw==,iv:/rYA+o/EXOLsbU8WUnp53ejYgi+TFb3DJ/fJS6iUjAM=,tag:hEPzYgcefH5iJWS1bF6R5A==,type:str]
applicationSecret: ENC[AES256_GCM,data:QsTdVpgbp/CAqt0mZPRNDINMach/EiM/1+kbgEzxIqE=,iv:/CJVh2tT7wXAdeuxBHN5kM/LidhgGKCTW66hxTcx4QA=,tag:yLw4HpAx7RlZ11LMPMdXtg==,type:str]
consumerKey: ENC[AES256_GCM,data:OmI9kc0tNQWCpM+Bg0oQMdYwhZRsqQDZ87NFpkYFpMo=,iv:7elfo7xvxa57du6IjZRJejdpgIQiSjgoRqhWAtMLzXg=,tag:Zk36lNZ+EcZYAye1W+4gwA==,type:str]
type: Opaque
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSByMWQvSUhwYnFyMHJXVWxQ
cjllMGlCRnRwdGJZRU9DVGdMUHE5ZUQxUEVjCkJnY3NWeDg5MnZOQjN3RDVtOTN2
c1Z0OUNsSm5IZ0k0UGJXRVlVRnRwQzQKLS0tIEtDRGVyN1gyaU9wM3ZLczRVYnBQ
czlyZ2lrYk1LNktxTkZiNUdFb0xHblEKlGExd13zMg6MofRAz+GT9wKL/sEBI6XD
u+dQAsphIoPpptFY0IeehXTLBV8xK4p1Z1/qu6UgJOnQtb2KGYOOvQ==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-12-26T23:59:54Z"
mac: ENC[AES256_GCM,data:dnguY6zpQRkj3cV2+CzCdIldBTVGUSIMh5bKoRsJ/cYONp9LjpqGZSmuDfFNRVaWU293M+T12criNH7SndGpquw46YJT48S14g9vi6NeRhK6Rl0z2TbNbtm/7uIUkgmHy1aur8IxfdDdzBScIlq0nfjhcTyYz1RYw/K2bKTwvzA=,iv:TZS0p+IPWqEq9trZxs7FGY7kZ83EaijFH1Kw/IElgjg=,tag:AlIFWcQfDMC9h7sm2WI9zQ==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.3

View File

@@ -0,0 +1,9 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./cert-manager/ks.yaml

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
labels:
kustomize.toolkit.fluxcd.io/prune: disabled

Some files were not shown because too many files have changed in this diff Show More