mirror of
https://github.com/auricom/home-cluster.git
synced 2025-12-23 16:05:23 +01:00
✨ new talos cluster
This commit is contained in:
25
.github/renovate.json5
vendored
25
.github/renovate.json5
vendored
@@ -32,18 +32,18 @@
|
|||||||
"fileMatch": ["ansible/.+/docker-compose.*\\.ya?ml(\\.j2)?$"]
|
"fileMatch": ["ansible/.+/docker-compose.*\\.ya?ml(\\.j2)?$"]
|
||||||
},
|
},
|
||||||
"flux": {
|
"flux": {
|
||||||
"fileMatch": ["cluster/.+\\.ya?ml$"]
|
"fileMatch": ["kubernetes/.+\\.ya?ml$"]
|
||||||
},
|
},
|
||||||
"helm-values": {
|
"helm-values": {
|
||||||
"fileMatch": ["cluster/.+\\.ya?ml$"]
|
"fileMatch": ["kubernetes/.+\\.ya?ml$"]
|
||||||
},
|
},
|
||||||
"kubernetes": {
|
"kubernetes": {
|
||||||
"fileMatch": ["cluster/.+\\.ya?ml$"]
|
"fileMatch": ["kubernetes/.+\\.ya?ml$"]
|
||||||
},
|
},
|
||||||
"regexManagers": [
|
"regexManagers": [
|
||||||
{
|
{
|
||||||
"description": "Process CRD dependencies",
|
"description": "Process CRD dependencies",
|
||||||
"fileMatch": ["cluster/.+\\.ya?ml$"],
|
"fileMatch": ["kubernetes/.+\\.ya?ml$"],
|
||||||
"matchStrings": [
|
"matchStrings": [
|
||||||
// GitRepository where 'Git release/tag' matches 'Helm' version
|
// GitRepository where 'Git release/tag' matches 'Helm' version
|
||||||
"registryUrl=(?<registryUrl>\\S+) chart=(?<depName>\\S+)\n.*?(?<currentValue>[^-\\s]*)\n",
|
"registryUrl=(?<registryUrl>\\S+) chart=(?<depName>\\S+)\n.*?(?<currentValue>[^-\\s]*)\n",
|
||||||
@@ -54,7 +54,10 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Process various dependencies",
|
"description": "Process various dependencies",
|
||||||
"fileMatch": ["ansible/.+\\.ya?ml$", "cluster/.+\\.ya?ml$"],
|
"fileMatch": [
|
||||||
|
"infrastructure/ansible/.+\\.ya?ml$",
|
||||||
|
"kubernetes/.+\\.ya?ml$"
|
||||||
|
],
|
||||||
"matchStrings": [
|
"matchStrings": [
|
||||||
"datasource=(?<datasource>\\S+) depName=(?<depName>\\S+)( versioning=(?<versioning>\\S+))?\n.*?\"(?<currentValue>.*)\"\n"
|
"datasource=(?<datasource>\\S+) depName=(?<depName>\\S+)( versioning=(?<versioning>\\S+))?\n.*?\"(?<currentValue>.*)\"\n"
|
||||||
],
|
],
|
||||||
@@ -63,7 +66,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Process raw GitHub URLs",
|
"description": "Process raw GitHub URLs",
|
||||||
"fileMatch": ["cluster/.+\\.ya?ml$"],
|
"fileMatch": ["kubernetes/.+\\.ya?ml$"],
|
||||||
"matchStrings": [
|
"matchStrings": [
|
||||||
"https:\\/\\/raw.githubusercontent.com\\/(?<depName>[\\w\\d\\-_]+\\/[\\w\\d\\-_]+)\\/(?<currentValue>[\\w\\d\\.\\-_]+)\\/.*"
|
"https:\\/\\/raw.githubusercontent.com\\/(?<depName>[\\w\\d\\-_]+\\/[\\w\\d\\-_]+)\\/(?<currentValue>[\\w\\d\\.\\-_]+)\\/.*"
|
||||||
],
|
],
|
||||||
@@ -86,13 +89,15 @@
|
|||||||
{
|
{
|
||||||
"matchDatasources": ["docker"],
|
"matchDatasources": ["docker"],
|
||||||
"versioning": "loose",
|
"versioning": "loose",
|
||||||
"matchPackageNames": ["ghcr.io/onedr0p/qbittorrent", "docker.io/zedeus/nitter", "quay.io/invidious/invidious"]
|
"matchPackageNames": [
|
||||||
|
"ghcr.io/onedr0p/qbittorrent",
|
||||||
|
"docker.io/zedeus/nitter",
|
||||||
|
"quay.io/invidious/invidious"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"matchDatasources": ["docker"],
|
"matchDatasources": ["docker"],
|
||||||
"matchPackageNames": [
|
"matchPackageNames": ["ghcr.io/linuxserver/calibre"],
|
||||||
"ghcr.io/linuxserver/calibre"
|
|
||||||
],
|
|
||||||
"versioning": "regex:^version-v?(?<major>\\d+)\\.(?<minor>\\d+)\\.(?<patch>\\d+)$"
|
"versioning": "regex:^version-v?(?<major>\\d+)\\.(?<minor>\\d+)\\.(?<patch>\\d+)$"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
9
.github/renovate/autoMerge.json5
vendored
9
.github/renovate/autoMerge.json5
vendored
@@ -18,6 +18,15 @@
|
|||||||
"matchUpdateTypes": ["minor", "patch"],
|
"matchUpdateTypes": ["minor", "patch"],
|
||||||
"matchPackageNames": ["ghcr.io/onedr0p/prowlarr-nightly"]
|
"matchPackageNames": ["ghcr.io/onedr0p/prowlarr-nightly"]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"description": "Auto merge containers (patch only)",
|
||||||
|
"matchDatasources": ["docker"],
|
||||||
|
"automerge": true,
|
||||||
|
"automergeType": "branch",
|
||||||
|
"requiredStatusChecks": null,
|
||||||
|
"matchUpdateTypes": ["patch"],
|
||||||
|
"matchPackageNames": ["ghcr.io/auricom/kubectl"]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"description": "Auto merge GitHub Actions",
|
"description": "Auto merge GitHub Actions",
|
||||||
"matchDatasources": ["github-tags"],
|
"matchDatasources": ["github-tags"],
|
||||||
|
|||||||
15
.sops.yaml
15
.sops.yaml
@@ -1,15 +1,20 @@
|
|||||||
creation_rules:
|
creation_rules:
|
||||||
- path_regex: cluster/.*\.sops\.ya?ml
|
- path_regex: kubernetes/.*\.sops\.ya?ml
|
||||||
encrypted_regex: "^(data|stringData)$"
|
encrypted_regex: ^(data|stringData)$
|
||||||
key_groups:
|
key_groups:
|
||||||
- age:
|
- age:
|
||||||
- age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
|
- age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
|
||||||
- path_regex: ansible/.*\.sops\.ya?ml
|
- path_regex: kubernetes/.*\.sops\.toml
|
||||||
unencrypted_regex: "^(kind)$"
|
|
||||||
key_groups:
|
key_groups:
|
||||||
- age:
|
- age:
|
||||||
- age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
|
- age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
|
||||||
- path_regex: cluster/.*\.sops\.toml
|
- path_regex: infrastructure/ansible/.*\.sops\.ya?ml
|
||||||
|
unencrypted_regex: ^(kind)$
|
||||||
key_groups:
|
key_groups:
|
||||||
- age:
|
- age:
|
||||||
- age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
|
- age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
|
||||||
|
- path_regex: .*\.sops\.ya?ml
|
||||||
|
key_groups:
|
||||||
|
- age:
|
||||||
|
- age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
|
||||||
|
|
||||||
|
|||||||
@@ -12,8 +12,8 @@ x-preconditions: &preconditions
|
|||||||
msg: "Claim '{{.CLAIM}}' in namespace '{{.NAMESPACE}}' not found"
|
msg: "Claim '{{.CLAIM}}' in namespace '{{.NAMESPACE}}' not found"
|
||||||
sh: kubectl get pvc -n {{.NAMESPACE}} {{.CLAIM}}
|
sh: kubectl get pvc -n {{.NAMESPACE}} {{.CLAIM}}
|
||||||
- &has-restore-job-file
|
- &has-restore-job-file
|
||||||
msg: "File '{{.PROJECT_DIR}}/hack/kopia-restore.yaml' not found"
|
msg: "File '{{.PROJECT_DIR}}/kubernetes/tools/kopia-restore.yaml' not found"
|
||||||
sh: "test -f {{.PROJECT_DIR}}/hack/kopia-restore.yaml"
|
sh: "test -f {{.PROJECT_DIR}}/kubernetes/tools/kopia-restore.yaml"
|
||||||
|
|
||||||
x-vars: &vars
|
x-vars: &vars
|
||||||
NAMESPACE:
|
NAMESPACE:
|
||||||
@@ -54,7 +54,7 @@ tasks:
|
|||||||
- flux -n {{.NAMESPACE}} suspend helmrelease {{.APP}}
|
- flux -n {{.NAMESPACE}} suspend helmrelease {{.APP}}
|
||||||
- kubectl -n {{.NAMESPACE}} scale {{.NAME}} --replicas 0
|
- kubectl -n {{.NAMESPACE}} scale {{.NAME}} --replicas 0
|
||||||
- kubectl -n {{.NAMESPACE}} wait pod --for delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=2m
|
- kubectl -n {{.NAMESPACE}} wait pod --for delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=2m
|
||||||
- envsubst < <(cat ./hack/kopia-restore.yaml) | kubectl apply -f -
|
- envsubst < <(cat ./kubernetes/tools/kopia-restore.yaml) | kubectl apply -f -
|
||||||
- sleep 2
|
- sleep 2
|
||||||
- kubectl -n {{.NAMESPACE}} wait job --for condition=complete {{.APP}}-{{.CLAIM}}-restore --timeout={{.TIMEOUT | default "60m"}}
|
- kubectl -n {{.NAMESPACE}} wait job --for condition=complete {{.APP}}-{{.CLAIM}}-restore --timeout={{.TIMEOUT | default "60m"}}
|
||||||
- flux -n {{.NAMESPACE}} resume helmrelease {{.APP}}
|
- flux -n {{.NAMESPACE}} resume helmrelease {{.APP}}
|
||||||
|
|||||||
@@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
# -- Encapsulation type
|
|
||||||
calico_encapsulation: "None"
|
|
||||||
# -- BGP Peer IP
|
|
||||||
# -- (usually your router IP address)
|
|
||||||
calico_bgp_peer_ip: 192.168.8.1
|
|
||||||
# -- BGP Autonomous System Number
|
|
||||||
# -- (must be the same across all BGP peers)
|
|
||||||
calico_bgp_as_number: 64512
|
|
||||||
# -- BGP Network you want services to consume
|
|
||||||
# -- (this network should not exist or be defined anywhere in your network)
|
|
||||||
calico_bgp_external_ips: 192.168.169.0/24
|
|
||||||
# -- CIDR of the host node interface Calico should use
|
|
||||||
calico_node_cidr: 10.69.0.0/16
|
|
||||||
@@ -1,79 +0,0 @@
|
|||||||
---
|
|
||||||
#
|
|
||||||
# Below vars are for the xanmanning.k3s role
|
|
||||||
# ...see https://github.com/PyratLabs/ansible-role-k3s#globalcluster-variables
|
|
||||||
#
|
|
||||||
|
|
||||||
# Use a specific version of k3s
|
|
||||||
# renovate: datasource=github-releases depName=k3s-io/k3s
|
|
||||||
k3s_release_version: "v1.25.3+k3s1"
|
|
||||||
|
|
||||||
# -- Install using hard links rather than symbolic links.
|
|
||||||
# ...if you are using the system-upgrade-controller you will need to
|
|
||||||
# use hard links rather than symbolic links as the controller will
|
|
||||||
# not be able to follow symbolic links.
|
|
||||||
k3s_install_hard_links: true
|
|
||||||
|
|
||||||
# -- Escalate user privileges for all tasks.
|
|
||||||
k3s_become: true
|
|
||||||
|
|
||||||
# -- Enable debugging
|
|
||||||
k3s_debug: false
|
|
||||||
|
|
||||||
# -- Enabled embedded etcd
|
|
||||||
# k3s_etcd_datastore: false
|
|
||||||
|
|
||||||
# -- Enable for single or even number of masters
|
|
||||||
k3s_use_unsupported_config: false
|
|
||||||
|
|
||||||
# -- /var/lib/rancher/k3s/server/manifests
|
|
||||||
k3s_server_manifests_templates:
|
|
||||||
- "calico/calico-installation.yaml.j2"
|
|
||||||
- "calico/calico-bgpconfiguration.yaml.j2"
|
|
||||||
- "calico/calico-bgppeer.yaml.j2"
|
|
||||||
|
|
||||||
# -- /var/lib/rancher/k3s/server/manifests
|
|
||||||
k3s_server_manifests_urls:
|
|
||||||
- url: https://docs.projectcalico.org/archive/v3.24/manifests/tigera-operator.yaml
|
|
||||||
filename: tigera-operator.yaml
|
|
||||||
|
|
||||||
# -- /etc/rancher/k3s/registries.yaml
|
|
||||||
# k3s_registries:
|
|
||||||
# mirrors:
|
|
||||||
# "docker.io":
|
|
||||||
# endpoint:
|
|
||||||
# - "https://mirror.{{ SECRET_PRIVATE_DOMAIN }}"
|
|
||||||
# "*":
|
|
||||||
# endpoint:
|
|
||||||
# - "https://mirror.{{ SECRET_PRIVATE_DOMAIN }}"
|
|
||||||
# config:
|
|
||||||
# "https://registry.{{ SECRET_PRIVATE_DOMAIN }}":
|
|
||||||
# auth:
|
|
||||||
# username: "{{ SECRET_NEXUS_USERNAME }}"
|
|
||||||
# password: "{{ SECRET_NEXUS_PASSWORD }}"
|
|
||||||
|
|
||||||
timezone: Europe/Paris
|
|
||||||
|
|
||||||
public_ssh_keys:
|
|
||||||
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL+GMHgvbtf6f7xUMAQR+vZFfD/mIIfIDNX5iP8tDRXZ claude@claude-thinkpad-fedora"
|
|
||||||
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINo7E0oAOzaq0XvUHkWvZSC8u1XxX8dDCq3bSyK2BCen claude@claude-fixe-fedora"
|
|
||||||
|
|
||||||
packages:
|
|
||||||
- "https://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-{{ ansible_distribution_major_version }}.noarch.rpm"
|
|
||||||
- "https://download1.rpmfusion.org/nonfree/fedora/rpmfusion-nonfree-release-{{ ansible_distribution_major_version }}.noarch.rpm"
|
|
||||||
- dnf-automatic
|
|
||||||
- dnf-plugin-system-upgrade
|
|
||||||
- dnf-utils
|
|
||||||
- fish
|
|
||||||
- hdparm
|
|
||||||
- htop
|
|
||||||
- intel-gpu-tools
|
|
||||||
- ipvsadm
|
|
||||||
- lm_sensors
|
|
||||||
- nano
|
|
||||||
- nvme-cli
|
|
||||||
- python3-libselinux
|
|
||||||
- socat
|
|
||||||
- cockpit-pcp
|
|
||||||
|
|
||||||
k3s_registration_address: 192.168.9.100
|
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
---
|
|
||||||
# https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/
|
|
||||||
# https://github.com/PyratLabs/ansible-role-k3s#server-control-plane-configuration
|
|
||||||
|
|
||||||
# Define the host as control plane nodes
|
|
||||||
k3s_control_node: true
|
|
||||||
|
|
||||||
k3s_etcd_datastore: false
|
|
||||||
|
|
||||||
# k3s settings for all control-plane nodes
|
|
||||||
k3s_server:
|
|
||||||
node-ip: "{{ ansible_host }}"
|
|
||||||
tls-san:
|
|
||||||
# # kube-vip
|
|
||||||
# - "{{ kubevip_address }}"
|
|
||||||
# haproxy
|
|
||||||
- "{{ k3s_registration_address }}"
|
|
||||||
docker: false
|
|
||||||
flannel-backend: "none" # This needs to be in quotes
|
|
||||||
disable:
|
|
||||||
- flannel
|
|
||||||
- traefik
|
|
||||||
- servicelb
|
|
||||||
- metrics-server
|
|
||||||
- local-storage
|
|
||||||
disable-network-policy: true
|
|
||||||
disable-cloud-controller: true
|
|
||||||
# Network CIDR to use for pod IPs
|
|
||||||
cluster-cidr: "10.95.0.0/16"
|
|
||||||
# Network CIDR to use for service IPs
|
|
||||||
service-cidr: "10.96.0.0/16"
|
|
||||||
# Required to monitor component with kube-prometheus-stack
|
|
||||||
# etcd-expose-metrics: true
|
|
||||||
kubelet-arg:
|
|
||||||
# Enable Alpha/Beta features
|
|
||||||
- "feature-gates=EphemeralContainers=true,MixedProtocolLBService=true,ReadWriteOncePod=true"
|
|
||||||
# Allow pods to be rescheduled quicker in the case of a node failure
|
|
||||||
# https://github.com/k3s-io/k3s/issues/1264
|
|
||||||
- "node-status-update-frequency=4s"
|
|
||||||
kube-controller-manager-arg:
|
|
||||||
# Enable Alpha/Beta features
|
|
||||||
- "feature-gates=EphemeralContainers=true,MixedProtocolLBService=true,ReadWriteOncePod=true"
|
|
||||||
# Required to monitor component with kube-prometheus-stack
|
|
||||||
- "bind-address=0.0.0.0"
|
|
||||||
# Allow pods to be rescheduled quicker in the case of a node failure
|
|
||||||
# https://github.com/k3s-io/k3s/issues/1264
|
|
||||||
- "node-monitor-period=4s"
|
|
||||||
- "node-monitor-grace-period=16s"
|
|
||||||
- "pod-eviction-timeout=20s"
|
|
||||||
kube-proxy-arg:
|
|
||||||
# Enable Alpha/Beta features
|
|
||||||
- "feature-gates=EphemeralContainers=true,MixedProtocolLBService=true,ReadWriteOncePod=true"
|
|
||||||
# Required to monitor component with kube-prometheus-stack
|
|
||||||
- "metrics-bind-address=0.0.0.0"
|
|
||||||
kube-scheduler-arg:
|
|
||||||
# Enable Alpha/Beta features
|
|
||||||
- "feature-gates=EphemeralContainers=true,MixedProtocolLBService=true,ReadWriteOncePod=true"
|
|
||||||
# Required to monitor component with kube-prometheus-stack
|
|
||||||
- "bind-address=0.0.0.0"
|
|
||||||
kube-apiserver-arg:
|
|
||||||
# Enable Alpha/Beta features
|
|
||||||
- "feature-gates=EphemeralContainers=true,MixedProtocolLBService=true,ReadWriteOncePod=true"
|
|
||||||
# Required for HAProxy health-checks
|
|
||||||
- "anonymous-auth=true"
|
|
||||||
# Allow pods to be rescheduled quicker in the case of a node failure
|
|
||||||
# https://github.com/k3s-io/k3s/issues/1264
|
|
||||||
- "default-not-ready-toleration-seconds=20"
|
|
||||||
- "default-unreachable-toleration-seconds=20"
|
|
||||||
# Stop k3s control plane having workloads scheduled on them
|
|
||||||
node-taint:
|
|
||||||
- "node-role.kubernetes.io/control-plane:NoSchedule"
|
|
||||||
node-label:
|
|
||||||
- "upgrade.cattle.io/plan=k3s-server"
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
# https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/
|
|
||||||
# https://github.com/PyratLabs/ansible-role-k3s#agent-worker-configuration
|
|
||||||
|
|
||||||
# Don't define the host as control plane nodes
|
|
||||||
k3s_control_node: false
|
|
||||||
|
|
||||||
# k3s settings for all worker nodes
|
|
||||||
k3s_agent:
|
|
||||||
node-ip: "{{ ansible_host }}"
|
|
||||||
kubelet-arg:
|
|
||||||
# Enable Alpha/Beta features
|
|
||||||
- "feature-gates=EphemeralContainers=true,MixedProtocolLBService=true,ReadWriteOncePod=true"
|
|
||||||
# Allow pods to be rescheduled quicker in the case of a node failure
|
|
||||||
# https://github.com/k3s-io/k3s/issues/1264
|
|
||||||
- "node-status-update-frequency=4s"
|
|
||||||
- "max-pods=150"
|
|
||||||
node-label:
|
|
||||||
- "upgrade.cattle.io/plan=k3s-agent"
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
kind: Secret
|
|
||||||
ansible_password: ENC[AES256_GCM,data:NTaCi8mqE7kAQA==,iv:yfHBgrBCf2CqWPyuVTKSwH/WUy6bkgiSoyL4hWQHG7s=,tag:e3311IReXe0RHGgttNg3pg==,type:str]
|
|
||||||
ansible_become_pass: ENC[AES256_GCM,data:ChsZxKZ1qvICFA==,iv:vuc4eZG4Ls2CiSP/vLazCy/sZkiPjjpGPZr97CvIoX4=,tag:onYhcvFkmAMN6PTFSp0Ikg==,type:str]
|
|
||||||
sops:
|
|
||||||
kms: []
|
|
||||||
gcp_kms: []
|
|
||||||
azure_kv: []
|
|
||||||
hc_vault: []
|
|
||||||
age:
|
|
||||||
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB5azdoWUV2SWdxaDl1NXVF
|
|
||||||
U1pvRjBncEpzM2E4TEs1MGlRbTRseG1zS0dNCnF6QmRmNU1iZ0J5K28rSlB4emFF
|
|
||||||
ODlnU1lXVFZrTHlyTEg5VlFXUERJNGcKLS0tIGhMQUhsa0xaUVU0RTRpbkx0Vk5r
|
|
||||||
NjJBcHVOSmUvNkt3b3I3dmJwTlJWS3MKw/hRA/oh1fiWts2aqbzTV3TTTcnSk3mi
|
|
||||||
fsw9jQF3QRL5PGbdT6iz7j58IokV32ilJubQHtfrxus29hd/qAn0yQ==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
lastmodified: "2022-07-03T14:40:36Z"
|
|
||||||
mac: ENC[AES256_GCM,data:c5yyBdFVs1wqDe8nsQOLeSzFv4QJ2n+VbrSf0dP5oW8593WBcdI8fXn9Q8fdY+wN2BOLn5vRdXBx7btlw0OrEIOOZ/Wz9tUxqIEUFZU6tT4TIB9g5jEqMgs2eKJmgLUoW/fcPC6QJ8ATApF6y8lI4RIV2LOItqK4AUpiVy4E2SU=,iv:kfrYGRaKY37OEl8ilrFFkRkItHpz/1VuAgWimjhujGA=,tag:STGaUOdwNlOAMcbU3Po1HQ==,type:str]
|
|
||||||
pgp: []
|
|
||||||
unencrypted_regex: ^(kind)$
|
|
||||||
version: 3.7.3
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
kind: Secret
|
|
||||||
ansible_password: ENC[AES256_GCM,data:AihMvIUjgEpCjg==,iv:Bk9uFrbhOvlQvoYaJz+JhtMJTAiQ0u9TcaS8eKO0+fE=,tag:R2sLCjH/my9kcsu4Ddg9jg==,type:str]
|
|
||||||
ansible_become_pass: ENC[AES256_GCM,data:nR/Wkn8NqM3vaA==,iv:iV8c6Qg59qKtHoaQReUTX+KDB+iSboxpSM/K8+gcZvQ=,tag://89MQ4jmQPib/D595YTbA==,type:str]
|
|
||||||
sops:
|
|
||||||
kms: []
|
|
||||||
gcp_kms: []
|
|
||||||
azure_kv: []
|
|
||||||
hc_vault: []
|
|
||||||
age:
|
|
||||||
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlZSs4aVZ5VGdyVllEMXl3
|
|
||||||
c2NGS2d0dkd4NVZlSVlBd2V3RVEzQ2FiaHlrCld0SkNKUjcvRHNEQ1dZZFUzM014
|
|
||||||
ejd5QW5uUzJmMERLR2h4R2M3UmdKWU0KLS0tIFdYOStkVG40TXIzVjRkK0RzZStj
|
|
||||||
UmhGcmVidTVKbWQ5VVpHSklYN2NyWGMKsfv/KG02qk3EJoNJQ9HNl1iyfyic6Puf
|
|
||||||
5owrc62PfohWnLVQby9SaVK80PJVaMRU/kcHIJvbt1Iv2f47qpKczg==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
lastmodified: "2022-07-03T14:40:40Z"
|
|
||||||
mac: ENC[AES256_GCM,data:6BqgWJTOzQKwu6Mr7/2WemzOmFNnIilSLH9LPG01UtvaO7FnOQXV1ezgYntKdSXGJWza/pvvqDURaBT7O7Rwv5kR25B6Fo3XWdVSuTLf+N4fGnWKiINaa6UjZhosm5KLs7VB0I3eiBTcHrxqb9jupgPkUErwy0H0LT8yLYRGpe8=,iv:kXeAB7zUoZoZPgEntWV80DNKSEiFiH4xQtbYpStO36U=,tag:gWusG9MGl+bYcjYfQGMbWA==,type:str]
|
|
||||||
pgp: []
|
|
||||||
unencrypted_regex: ^(kind)$
|
|
||||||
version: 3.7.3
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
kind: Secret
|
|
||||||
ansible_password: ENC[AES256_GCM,data:495JSVNY5Rn0hg==,iv:ZvJb1M4Ys8FkQpekm5jnGWKE5q63Z44OUhhtYWsJUvQ=,tag:KxgvJbsEMsdYu59yCOCjMg==,type:str]
|
|
||||||
ansible_become_pass: ENC[AES256_GCM,data:O8lTma7A2n6+5g==,iv:ggmSecFPtTI9vy81of5I6AHnRX2YWOw0VtVldv4PZmo=,tag:IfIuN8xcKHBF6Ojlmki5Tw==,type:str]
|
|
||||||
sops:
|
|
||||||
kms: []
|
|
||||||
gcp_kms: []
|
|
||||||
azure_kv: []
|
|
||||||
hc_vault: []
|
|
||||||
age:
|
|
||||||
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0d21nNHZQRkloNnd1M2xF
|
|
||||||
RlJCUzBZK04rQ1RSa0hFSXUrVTlzK0V1dEdjCkg0ZnVJNGJOZjN3RlZ2RGRmRFdV
|
|
||||||
akRPQzhwN3NqNHJlK0o1VVFncDVnd1kKLS0tIDhhRGlhNXJmanM5amR6eHZERElj
|
|
||||||
RndiYkJFaWZuUmVIU3JwSWYzTFZlS3cKHFe4yce/091eEvtrSBYggNgyO88eHA4s
|
|
||||||
3TvjHmS7tLv7BnBAT9LLcQVSIW0UOszzF3PvVWIqFqzB/wn0j370kw==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
lastmodified: "2022-07-03T14:40:42Z"
|
|
||||||
mac: ENC[AES256_GCM,data:qFIsrbqI+c3fe88H40KkWhwOnZ2aePoorpfxeTjhBtPviT4jBMvIGYZKULCehcdULNMxe7QWuPWsdYY/o5ruqZC49/OrV9qI0XVU6gdiCsM1jcXXiyFkVFfMoMhj5c5yAIMoUKRWbZe2kFtJxaG7ng8VusMgCc9f7LofWiFToVo=,iv:BI2hEL/AsaZoZ4RL7QNy4vins877XgZwxCdJ0ciFEUo=,tag:7tOEfmkFEApTy5wIgJLEBA==,type:str]
|
|
||||||
pgp: []
|
|
||||||
unencrypted_regex: ^(kind)$
|
|
||||||
version: 3.7.3
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
kind: Secret
|
|
||||||
ansible_password: ENC[AES256_GCM,data:n0ASYgah4hAFvw==,iv:P0OPjAGh4AWkw0HUpBNEom6twa3sAXsh0Ei+2UDj/qo=,tag:GNcmaw2BQr5TV755NL/0vw==,type:str]
|
|
||||||
ansible_become_pass: ENC[AES256_GCM,data:a2wZnzPgf91HvQ==,iv:8wIjFmwSkYZIZmLLhvZTG1EnMmNffuSoPkpao6Kk9wI=,tag:gta1yPH1tRzBdViIO9WOAg==,type:str]
|
|
||||||
sops:
|
|
||||||
kms: []
|
|
||||||
gcp_kms: []
|
|
||||||
azure_kv: []
|
|
||||||
hc_vault: []
|
|
||||||
age:
|
|
||||||
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBQL2pJWVRDU0lBVVgxNkd6
|
|
||||||
MStqdFRFNGdwTEpUWUxEaVVMUVBkY2RXWFUwCmJmbGZnMzVPZjhQMWh0eWhybXdi
|
|
||||||
K1FIa1YrNDZjMnhONDBiSEFtTW80WlkKLS0tIHJJTFpINUowclNUZXVsa2I1Vjdw
|
|
||||||
NkhyZm5SVnlBYWxlajh6NjV0OVBCSE0Kl6ovgsGkzq4XetwG5b77mvztpa3bD5ej
|
|
||||||
mWlPbSV66yw4eENVuDtZRX5/lrnbW7EqkwjfGoEJ9YGA7ya0G6IVQw==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
lastmodified: "2022-07-03T14:40:44Z"
|
|
||||||
mac: ENC[AES256_GCM,data:/AA8sbAxsYhGAad8/ymYq0YgzwmNvnnwK+p9J7+NUpFC9YGWwuR/dV8oxKzqOs/zEzFTwyBTvOrGeQ59xyJ/Id/xSt5Av0FTmrOXQxFwIOsMUsH5RP8khQpp9yO1c2cvxwNLi1oWGzLLE63Zl2JwutQdTVH0KgibPhtdL0sV8eQ=,iv:rTpWgrMAZrCymFqKGcEGOyQJdPAw/SmeW8vdVNX/Ptg=,tag:rlg3dcQhVwcXUKkEc4Jdww==,type:str]
|
|
||||||
pgp: []
|
|
||||||
unencrypted_regex: ^(kind)$
|
|
||||||
version: 3.7.3
|
|
||||||
@@ -1,88 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts:
|
|
||||||
- master
|
|
||||||
- worker
|
|
||||||
become: true
|
|
||||||
gather_facts: true
|
|
||||||
any_errors_fatal: true
|
|
||||||
pre_tasks:
|
|
||||||
- name: Pausing for 5 seconds...
|
|
||||||
ansible.builtin.pause:
|
|
||||||
seconds: 5
|
|
||||||
tasks:
|
|
||||||
- name: Check if cluster is installed
|
|
||||||
check_mode: false
|
|
||||||
ansible.builtin.stat:
|
|
||||||
path: "/etc/rancher/k3s/config.yaml"
|
|
||||||
register: k3s_check_installed
|
|
||||||
|
|
||||||
- name: Set manifest facts
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
k3s_server_manifests_templates: []
|
|
||||||
k3s_server_manifests_urls: []
|
|
||||||
when: k3s_check_installed.stat.exists
|
|
||||||
|
|
||||||
- name: Install Kubernetes
|
|
||||||
ansible.builtin.include_role:
|
|
||||||
name: xanmanning.k3s
|
|
||||||
public: true
|
|
||||||
vars:
|
|
||||||
k3s_state: installed
|
|
||||||
|
|
||||||
- name: Get absolute path to this Git repository
|
|
||||||
delegate_to: localhost
|
|
||||||
become: false
|
|
||||||
run_once: true
|
|
||||||
check_mode: false
|
|
||||||
ansible.builtin.command: |-
|
|
||||||
git rev-parse --show-toplevel
|
|
||||||
register: repo_abs_path
|
|
||||||
changed_when: "repo_abs_path.rc != 2"
|
|
||||||
|
|
||||||
- name: Copy kubeconfig to provision folder
|
|
||||||
run_once: true
|
|
||||||
ansible.builtin.fetch:
|
|
||||||
src: "/etc/rancher/k3s/k3s.yaml"
|
|
||||||
dest: "{{ repo_abs_path.stdout }}/provision/kubeconfig"
|
|
||||||
flat: true
|
|
||||||
when:
|
|
||||||
- k3s_control_node is defined
|
|
||||||
- k3s_control_node
|
|
||||||
|
|
||||||
- name: Update kubeconfig with the correct IPv4 address
|
|
||||||
delegate_to: localhost
|
|
||||||
become: false
|
|
||||||
run_once: true
|
|
||||||
ansible.builtin.replace:
|
|
||||||
path: "{{ repo_abs_path.stdout }}/provision/kubeconfig"
|
|
||||||
regexp: "https://127.0.0.1:6443"
|
|
||||||
replace: "https://{{ k3s_registration_address }}:6443"
|
|
||||||
|
|
||||||
# Cleaning up the manifests from the /var/lib/rancher/k3s/server/manifests
|
|
||||||
# directory is needed because k3s has an awesome
|
|
||||||
# "feature" to always deploy these on restarting
|
|
||||||
# the k3s systemd service. Removing them does
|
|
||||||
# not uninstall the manifests.
|
|
||||||
|
|
||||||
# Removing them means we can manage the lifecycle
|
|
||||||
# of these components outside of the
|
|
||||||
# /var/lib/rancher/k3s/server/manifests directory
|
|
||||||
|
|
||||||
# FIXME(ansible): Check for deployments to be happy rather than waiting
|
|
||||||
- name: Wait for k3s to finish installing the deployed manifests
|
|
||||||
ansible.builtin.wait_for:
|
|
||||||
timeout: 15
|
|
||||||
when: k3s_server_manifests_templates | length > 0
|
|
||||||
or k3s_server_manifests_dir | length > 0
|
|
||||||
|
|
||||||
- name: Remove deployed manifest templates
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ k3s_server_manifests_dir }}/{{ item | basename | regex_replace('\\.j2$', '') }}"
|
|
||||||
state: absent
|
|
||||||
loop: "{{ k3s_server_manifests_templates | default([]) }}"
|
|
||||||
|
|
||||||
- name: Remove deployed manifest urls
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ k3s_server_manifests_dir }}/{{ item.filename }}"
|
|
||||||
state: absent
|
|
||||||
loop: "{{ k3s_server_manifests_urls | default([]) }}"
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts:
|
|
||||||
- master
|
|
||||||
- worker
|
|
||||||
become: true
|
|
||||||
gather_facts: true
|
|
||||||
any_errors_fatal: true
|
|
||||||
vars_prompt:
|
|
||||||
- name: nuke
|
|
||||||
prompt: |-
|
|
||||||
Are you sure you want to nuke this cluster?
|
|
||||||
Type YES I WANT TO DESTROY THIS CLUSTER to proceed
|
|
||||||
default: "n"
|
|
||||||
private: false
|
|
||||||
pre_tasks:
|
|
||||||
- name: Check for confirmation
|
|
||||||
ansible.builtin.fail:
|
|
||||||
msg: Aborted nuking the cluster
|
|
||||||
when: nuke != 'YES I WANT TO DESTROY THIS CLUSTER'
|
|
||||||
|
|
||||||
- name: Pausing for 5 seconds...
|
|
||||||
ansible.builtin.pause:
|
|
||||||
seconds: 5
|
|
||||||
tasks:
|
|
||||||
- name: Uninstall k3s
|
|
||||||
ansible.builtin.include_role:
|
|
||||||
name: xanmanning.k3s
|
|
||||||
public: true
|
|
||||||
vars:
|
|
||||||
k3s_state: uninstalled
|
|
||||||
- name: Gather list of CNI files
|
|
||||||
ansible.builtin.find:
|
|
||||||
paths: /etc/cni/net.d
|
|
||||||
patterns: "*"
|
|
||||||
hidden: true
|
|
||||||
register: directory_contents
|
|
||||||
- name: Delete CNI files
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ item.path }}"
|
|
||||||
state: absent
|
|
||||||
loop: "{{ directory_contents.files }}"
|
|
||||||
@@ -1,184 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts:
|
|
||||||
- master
|
|
||||||
- worker
|
|
||||||
become: true
|
|
||||||
gather_facts: true
|
|
||||||
serial: 1
|
|
||||||
any_errors_fatal: true
|
|
||||||
pre_tasks:
|
|
||||||
- name: Pausing for 5 seconds...
|
|
||||||
ansible.builtin.pause:
|
|
||||||
seconds: 5
|
|
||||||
tasks:
|
|
||||||
- name: Locale
|
|
||||||
block:
|
|
||||||
- name: Locale | Set timezone
|
|
||||||
community.general.timezone:
|
|
||||||
name: "{{ timezone | default('Europe/Paris') }}"
|
|
||||||
- name: Networking
|
|
||||||
block:
|
|
||||||
- name: Networking | Set hostname to inventory hostname
|
|
||||||
ansible.builtin.hostname:
|
|
||||||
name: "{{ inventory_hostname }}"
|
|
||||||
- name: Networking | Update /etc/hosts to include inventory hostname
|
|
||||||
ansible.builtin.blockinfile:
|
|
||||||
path: /etc/hosts
|
|
||||||
block: |
|
|
||||||
127.0.1.1 {{ inventory_hostname }}
|
|
||||||
- name: Packages
|
|
||||||
block:
|
|
||||||
- name: Packages | Improve dnf performance
|
|
||||||
ansible.builtin.blockinfile:
|
|
||||||
path: /etc/dnf/dnf.conf
|
|
||||||
block: |
|
|
||||||
defaultyes=True
|
|
||||||
deltarpm=True
|
|
||||||
install_weak_deps=False
|
|
||||||
max_parallel_downloads={{ ansible_processor_vcpus | default('8') }}
|
|
||||||
- name: Packages | Import rpmfusion keys
|
|
||||||
ansible.builtin.rpm_key:
|
|
||||||
state: present
|
|
||||||
key: "{{ item }}"
|
|
||||||
loop:
|
|
||||||
- https://rpmfusion.org/keys?action=AttachFile&do=get&target=RPM-GPG-KEY-rpmfusion-free-fedora-2020
|
|
||||||
- https://rpmfusion.org/keys?action=AttachFile&do=get&target=RPM-GPG-KEY-rpmfusion-nonfree-fedora-2020
|
|
||||||
- name: Packages | Install required packages
|
|
||||||
ansible.builtin.dnf:
|
|
||||||
name: "{{ packages | default([]) }}"
|
|
||||||
state: present
|
|
||||||
update_cache: true
|
|
||||||
- name: Packages | Remove leaf packages
|
|
||||||
ansible.builtin.dnf:
|
|
||||||
autoremove: true
|
|
||||||
- name: Packages | Enable automatic download of updates
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
service: dnf-automatic-download.timer
|
|
||||||
enabled: true
|
|
||||||
state: started
|
|
||||||
- name: Packages | Enable cockpit
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
service: cockpit.socket
|
|
||||||
enabled: true
|
|
||||||
state: started
|
|
||||||
- name: User Configuration
|
|
||||||
block:
|
|
||||||
- name: User Configuration | Change shell to fish
|
|
||||||
ansible.builtin.user:
|
|
||||||
name: "{{ item }}"
|
|
||||||
shell: /usr/bin/fish
|
|
||||||
loop:
|
|
||||||
- root
|
|
||||||
- fedora
|
|
||||||
- name: User Configuration | Disable password sudo
|
|
||||||
ansible.builtin.lineinfile:
|
|
||||||
dest: /etc/sudoers
|
|
||||||
state: present
|
|
||||||
regexp: "^%wheel"
|
|
||||||
line: "%wheel ALL=(ALL) NOPASSWD: ALL"
|
|
||||||
validate: visudo -cf %s
|
|
||||||
become: true
|
|
||||||
- name: User Configuration | Add additional SSH public keys
|
|
||||||
ansible.posix.authorized_key:
|
|
||||||
user: "{{ ansible_user }}"
|
|
||||||
key: "{{ item }}"
|
|
||||||
loop: "{{ public_ssh_keys | default([]) }}"
|
|
||||||
- name: System Configuration (1)
|
|
||||||
block:
|
|
||||||
- name: System Configuration (1) | Configure smartd
|
|
||||||
ansible.builtin.copy:
|
|
||||||
dest: /etc/smartd.conf
|
|
||||||
mode: 0644
|
|
||||||
content: DEVICESCAN -a -o on -S on -n standby,q -s (S/../.././02|L/../../6/03) -W 4,35,40
|
|
||||||
notify: Restart smartd
|
|
||||||
- name: System Configuration (1) | Disable firewalld
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
service: firewalld.service
|
|
||||||
enabled: false
|
|
||||||
masked: true
|
|
||||||
state: stopped
|
|
||||||
- name: System Configuration (1) | Enable fstrim
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
service: fstrim.timer
|
|
||||||
enabled: true
|
|
||||||
- name: System Configuration (1) | Enable chronyd
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
service: chronyd
|
|
||||||
enabled: true
|
|
||||||
- name: System Configuration (2)
|
|
||||||
block:
|
|
||||||
- name: System Configuration (2) | Enable kernel modules now
|
|
||||||
community.general.modprobe:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
loop: [br_netfilter, overlay, rbd]
|
|
||||||
- name: System Configuration (2) | Enable kernel modules on boot
|
|
||||||
ansible.builtin.copy:
|
|
||||||
mode: 0644
|
|
||||||
content: "{{ item }}"
|
|
||||||
dest: "/etc/modules-load.d/{{ item }}.conf"
|
|
||||||
loop: [br_netfilter, overlay, rbd]
|
|
||||||
- name: System Configuration (2) | Set sysctls
|
|
||||||
ansible.posix.sysctl:
|
|
||||||
name: "{{ item.key }}"
|
|
||||||
value: "{{ item.value }}"
|
|
||||||
sysctl_file: /etc/sysctl.d/99-kubernetes.conf
|
|
||||||
reload: true
|
|
||||||
with_dict: "{{ sysctl_config }}"
|
|
||||||
vars:
|
|
||||||
sysctl_config:
|
|
||||||
net.ipv4.ip_forward: 1
|
|
||||||
net.ipv4.conf.all.forwarding: 1
|
|
||||||
net.ipv4.conf.all.rp_filter: 0
|
|
||||||
net.ipv4.conf.default.rp_filter: 0
|
|
||||||
net.ipv6.conf.all.forwarding: 1
|
|
||||||
net.bridge.bridge-nf-call-iptables: 1
|
|
||||||
net.bridge.bridge-nf-call-ip6tables: 1
|
|
||||||
fs.inotify.max_user_watches: 524288
|
|
||||||
fs.inotify.max_user_instances: 512
|
|
||||||
- name: System Configuration (2) | Disable swap
|
|
||||||
ansible.builtin.dnf:
|
|
||||||
name: zram-generator-defaults
|
|
||||||
state: absent
|
|
||||||
- name: System Configuration (2) | Permissive SELinux
|
|
||||||
ansible.posix.selinux:
|
|
||||||
state: permissive
|
|
||||||
policy: targeted
|
|
||||||
- name: System Configuration (2) | Disable mitigations
|
|
||||||
ansible.builtin.replace:
|
|
||||||
path: /etc/default/grub
|
|
||||||
regexp: '^(GRUB_CMDLINE_LINUX=(?:(?![" ]{{ item.key | regex_escape }}=).)*)(?:[" ]{{ item.key | regex_escape }}=\S+)?(.*")$'
|
|
||||||
replace: '\1 {{ item.key }}={{ item.value }}\2'
|
|
||||||
with_dict: "{{ grub_config }}"
|
|
||||||
vars:
|
|
||||||
grub_config:
|
|
||||||
mitigations: "off"
|
|
||||||
register: grub_status
|
|
||||||
- name: System Configuration (2) | Reconfigure grub and initramfs
|
|
||||||
ansible.builtin.command: "{{ item }}"
|
|
||||||
loop:
|
|
||||||
- grub2-mkconfig -o /boot/grub2/grub.cfg
|
|
||||||
- dracut --force --regenerate-all -v
|
|
||||||
when: grub_status.changed
|
|
||||||
- name: System Configuration (3) | NetworkManager - Calico fix
|
|
||||||
ansible.builtin.blockinfile:
|
|
||||||
path: /etc/NetworkManager/conf.d/calico.conf
|
|
||||||
create: true
|
|
||||||
block: |
|
|
||||||
[keyfile]
|
|
||||||
unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali
|
|
||||||
- name: System Configuration (3) | NetworkManager - RX Ring buffer size check
|
|
||||||
ansible.builtin.command:
|
|
||||||
cmd: "cat /etc/NetworkManager/system-connections/eno1.nmconnection"
|
|
||||||
register: rx_ring_cat
|
|
||||||
changed_when: false
|
|
||||||
- name: System Configuration (3) | NetworkManager - RX Ring buffer size
|
|
||||||
ansible.builtin.command:
|
|
||||||
cmd: "nmcli connection modify eno1 ethtool.ring-rx 1024"
|
|
||||||
when: rx_ring_cat.stdout.find("ring-rx=1024") == -1
|
|
||||||
|
|
||||||
# notify: Reboot
|
|
||||||
handlers:
|
|
||||||
- name: Reboot
|
|
||||||
ansible.builtin.reboot:
|
|
||||||
msg: Rebooting nodes
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts:
|
|
||||||
- master
|
|
||||||
- worker
|
|
||||||
become: true
|
|
||||||
gather_facts: true
|
|
||||||
any_errors_fatal: true
|
|
||||||
pre_tasks:
|
|
||||||
- name: Pausing for 5 seconds...
|
|
||||||
ansible.builtin.pause:
|
|
||||||
seconds: 5
|
|
||||||
tasks:
|
|
||||||
- name: Reboot
|
|
||||||
ansible.builtin.reboot:
|
|
||||||
msg: Rebooting nodes
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts:
|
|
||||||
- worker
|
|
||||||
become: true
|
|
||||||
gather_facts: true
|
|
||||||
any_errors_fatal: true
|
|
||||||
pre_tasks:
|
|
||||||
- name: Pausing for 5 seconds...
|
|
||||||
ansible.builtin.pause:
|
|
||||||
seconds: 5
|
|
||||||
tasks:
|
|
||||||
- name: Remove /var/lib/rook
|
|
||||||
ansible.builtin.file:
|
|
||||||
state: absent
|
|
||||||
path: "/var/lib/rook"
|
|
||||||
- name: Zap the drives
|
|
||||||
ansible.builtin.shell: "sgdisk --zap-all {{ item }} || true"
|
|
||||||
register: rc
|
|
||||||
changed_when: "rc.rc != 2"
|
|
||||||
loop:
|
|
||||||
- "{{ rook_devices | default([]) }}"
|
|
||||||
- name: Remove lvm partitions
|
|
||||||
ansible.builtin.shell: "{{ item }}"
|
|
||||||
loop:
|
|
||||||
- ls /dev/mapper/ceph--* | xargs -I% -- fuser --kill %
|
|
||||||
- ls /dev/mapper/ceph--* | xargs -I% -- dmsetup clear %
|
|
||||||
- ls /dev/mapper/ceph--* | xargs -I% -- dmsetup remove -f %
|
|
||||||
- ls /dev/mapper/ceph--* | xargs -I% -- rm -rf %
|
|
||||||
register: rc
|
|
||||||
changed_when: "rc.rc != 2"
|
|
||||||
- name: Wipe the block device
|
|
||||||
ansible.builtin.command: "wipefs -af {{ item }}"
|
|
||||||
register: rc
|
|
||||||
changed_when: "rc.rc != 2"
|
|
||||||
with_items:
|
|
||||||
- "{{ rook_devices | default([]) }}"
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: crd.projectcalico.org/v1
|
|
||||||
kind: BGPConfiguration
|
|
||||||
metadata:
|
|
||||||
name: default
|
|
||||||
spec:
|
|
||||||
asNumber: {{ calico_bgp_as_number }}
|
|
||||||
serviceExternalIPs:
|
|
||||||
- cidr: "{{ calico_bgp_external_ips }}"
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: crd.projectcalico.org/v1
|
|
||||||
kind: BGPPeer
|
|
||||||
metadata:
|
|
||||||
name: global
|
|
||||||
spec:
|
|
||||||
peerIP: {{ calico_bgp_peer_ip }}
|
|
||||||
asNumber: {{ calico_bgp_as_number }}
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: operator.tigera.io/v1
|
|
||||||
kind: Installation
|
|
||||||
metadata:
|
|
||||||
name: default
|
|
||||||
spec:
|
|
||||||
registry: quay.io
|
|
||||||
imagePath: calico
|
|
||||||
calicoNetwork:
|
|
||||||
# Note: The ipPools section cannot be modified post-install.
|
|
||||||
ipPools:
|
|
||||||
- blockSize: 26
|
|
||||||
cidr: "{{ k3s_server['cluster-cidr'] }}"
|
|
||||||
encapsulation: "{{ calico_encapsulation }}"
|
|
||||||
natOutgoing: Enabled
|
|
||||||
nodeSelector: all()
|
|
||||||
nodeMetricsPort: 9091
|
|
||||||
typhaMetricsPort: 9093
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
namespace: system-upgrade
|
|
||||||
resources:
|
|
||||||
# renovate: datasource=docker image=rancher/system-upgrade-controller
|
|
||||||
- https://github.com/rancher/system-upgrade-controller/releases/download/v0.9.1/crd.yaml
|
|
||||||
- system-upgrade-controller
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- github.com/rancher/system-upgrade-controller?ref=v0.9.1
|
|
||||||
- plans
|
|
||||||
images:
|
|
||||||
- name: rancher/system-upgrade-controller
|
|
||||||
newTag: v0.9.1
|
|
||||||
patchesStrategicMerge:
|
|
||||||
# Delete namespace resource
|
|
||||||
- ./system-upgrade-patches.yaml
|
|
||||||
# Add labels
|
|
||||||
- |-
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: system-upgrade-controller
|
|
||||||
namespace: system-upgrade
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: system-upgrade-controller
|
|
||||||
app.kubernetes.io/instance: system-upgrade-controller
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: upgrade.cattle.io/v1
|
|
||||||
kind: Plan
|
|
||||||
metadata:
|
|
||||||
name: k3s-agent
|
|
||||||
namespace: system-upgrade
|
|
||||||
labels:
|
|
||||||
k3s-upgrade: agent
|
|
||||||
spec:
|
|
||||||
# renovate: datasource=github-releases depName=k3s-io/k3s
|
|
||||||
version: "v1.25.3+k3s1"
|
|
||||||
serviceAccountName: system-upgrade
|
|
||||||
concurrency: 1
|
|
||||||
nodeSelector:
|
|
||||||
matchExpressions:
|
|
||||||
- { key: node-role.kubernetes.io/control-plane, operator: DoesNotExist }
|
|
||||||
prepare:
|
|
||||||
image: rancher/k3s-upgrade
|
|
||||||
args: ["prepare", "k3s-server"]
|
|
||||||
upgrade:
|
|
||||||
image: rancher/k3s-upgrade
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: upgrade.cattle.io/v1
|
|
||||||
kind: Plan
|
|
||||||
metadata:
|
|
||||||
name: k3s-server
|
|
||||||
namespace: system-upgrade
|
|
||||||
labels:
|
|
||||||
k3s-upgrade: server
|
|
||||||
spec:
|
|
||||||
# renovate: datasource=github-releases depName=k3s-io/k3s
|
|
||||||
version: "v1.25.3+k3s1"
|
|
||||||
serviceAccountName: system-upgrade
|
|
||||||
concurrency: 1
|
|
||||||
cordon: true
|
|
||||||
nodeSelector:
|
|
||||||
matchExpressions:
|
|
||||||
- { key: node-role.kubernetes.io/control-plane, operator: Exists }
|
|
||||||
tolerations:
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
effect: NoSchedule
|
|
||||||
upgrade:
|
|
||||||
image: rancher/k3s-upgrade
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
# Namespace should already exist
|
|
||||||
# Delete the system-upgrade namespace
|
|
||||||
# from the kustomization
|
|
||||||
$patch: delete
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: system-upgrade
|
|
||||||
@@ -1,66 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: vector-agent
|
|
||||||
namespace: monitoring
|
|
||||||
spec:
|
|
||||||
interval: 15m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: vector
|
|
||||||
version: 0.17.0
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: vector-charts
|
|
||||||
namespace: flux-system
|
|
||||||
install:
|
|
||||||
createNamespace: true
|
|
||||||
remediation:
|
|
||||||
retries: 5
|
|
||||||
upgrade:
|
|
||||||
remediation:
|
|
||||||
retries: 5
|
|
||||||
dependsOn:
|
|
||||||
- name: loki
|
|
||||||
namespace: monitoring
|
|
||||||
- name: vector-aggregator
|
|
||||||
namespace: monitoring
|
|
||||||
values:
|
|
||||||
image:
|
|
||||||
repository: timberio/vector
|
|
||||||
tag: 0.25.1-debian
|
|
||||||
role: Agent
|
|
||||||
customConfig:
|
|
||||||
data_dir: /vector-data-dir
|
|
||||||
api:
|
|
||||||
enabled: false
|
|
||||||
sources:
|
|
||||||
journal_logs:
|
|
||||||
type: journald
|
|
||||||
journal_directory: /var/log/journal
|
|
||||||
kubernetes_logs:
|
|
||||||
type: kubernetes_logs
|
|
||||||
pod_annotation_fields:
|
|
||||||
container_image: container_image
|
|
||||||
container_name: container_name
|
|
||||||
pod_annotations: pod_annotations
|
|
||||||
pod_labels: pod_labels
|
|
||||||
pod_name: pod_name
|
|
||||||
sinks:
|
|
||||||
loki_journal_sink:
|
|
||||||
type: vector
|
|
||||||
inputs:
|
|
||||||
- journal_logs
|
|
||||||
address: vector-aggregator:6000
|
|
||||||
version: "2"
|
|
||||||
loki_kubernetes_sink:
|
|
||||||
type: vector
|
|
||||||
inputs:
|
|
||||||
- kubernetes_logs
|
|
||||||
address: vector-aggregator:6010
|
|
||||||
version: "2"
|
|
||||||
service:
|
|
||||||
enabled: false
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
@@ -1,179 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: vector-aggregator
|
|
||||||
namespace: monitoring
|
|
||||||
spec:
|
|
||||||
interval: 15m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: vector
|
|
||||||
version: 0.17.0
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: vector-charts
|
|
||||||
namespace: flux-system
|
|
||||||
install:
|
|
||||||
createNamespace: true
|
|
||||||
remediation:
|
|
||||||
retries: 5
|
|
||||||
upgrade:
|
|
||||||
remediation:
|
|
||||||
retries: 5
|
|
||||||
dependsOn:
|
|
||||||
- name: loki
|
|
||||||
namespace: monitoring
|
|
||||||
values:
|
|
||||||
image:
|
|
||||||
repository: timberio/vector
|
|
||||||
tag: 0.25.1-debian
|
|
||||||
role: Stateless-Aggregator
|
|
||||||
replicas: 2
|
|
||||||
customConfig:
|
|
||||||
data_dir: /vector-data-dir
|
|
||||||
api:
|
|
||||||
enabled: false
|
|
||||||
sources:
|
|
||||||
journal_logs:
|
|
||||||
type: vector
|
|
||||||
address: 0.0.0.0:6000
|
|
||||||
version: "2"
|
|
||||||
kubernetes_logs:
|
|
||||||
type: vector
|
|
||||||
address: 0.0.0.0:6010
|
|
||||||
version: "2"
|
|
||||||
opnsense_filterlog_logs:
|
|
||||||
type: syslog
|
|
||||||
address: 0.0.0.0:5140
|
|
||||||
mode: udp
|
|
||||||
transforms:
|
|
||||||
kubernetes_logs_remap:
|
|
||||||
type: remap
|
|
||||||
inputs:
|
|
||||||
- kubernetes_logs
|
|
||||||
source: |
|
|
||||||
# Standardize 'app' index
|
|
||||||
.custom_app_name = .pod_labels."app.kubernetes.io/name" || .pod_labels.app || .pod_labels."k8s-app" || "unknown"
|
|
||||||
opnsense_filterlog_remap:
|
|
||||||
type: remap
|
|
||||||
inputs:
|
|
||||||
- opnsense_filterlog_logs
|
|
||||||
source: |
|
|
||||||
msg = parse_csv!(string!(.message))
|
|
||||||
# Only parse IPv4 / IPv6
|
|
||||||
if msg[8] == "4" || msg[8] == "6" {
|
|
||||||
.filter_interface = msg[4]
|
|
||||||
.filter_direction = msg[7]
|
|
||||||
.filter_action = msg[6]
|
|
||||||
.filter_ip_version = msg[8]
|
|
||||||
.filter_protocol = msg[16]
|
|
||||||
.filter_source_ip = msg[18]
|
|
||||||
.filter_destination_ip = msg[19]
|
|
||||||
if (msg[16] == "icmp" || msg[16] == "igmp" || msg[16] == "gre") {
|
|
||||||
.filter_data = msg[20]
|
|
||||||
} else {
|
|
||||||
.filter_source_port = msg[20]
|
|
||||||
.filter_destination_port = msg[21]
|
|
||||||
.filter_data_length = msg[22]
|
|
||||||
if msg[8] == "4" && msg[16] == "tcp" {
|
|
||||||
.filter_tcp_flags = msg[23]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
opnsense_filterlog_route:
|
|
||||||
type: route
|
|
||||||
inputs:
|
|
||||||
- opnsense_filterlog_remap
|
|
||||||
route:
|
|
||||||
pass_action: >-
|
|
||||||
.filter_action == "pass"
|
|
||||||
opnsense_filterlog_geoip:
|
|
||||||
type: geoip
|
|
||||||
inputs:
|
|
||||||
- opnsense_filterlog_route.pass_action
|
|
||||||
database: /geoip/GeoLite2-City.mmdb
|
|
||||||
source: filter_source_ip
|
|
||||||
target: geoip
|
|
||||||
sinks:
|
|
||||||
loki_journal:
|
|
||||||
type: loki
|
|
||||||
inputs:
|
|
||||||
- journal_logs
|
|
||||||
endpoint: http://loki-gateway:80
|
|
||||||
encoding:
|
|
||||||
codec: json
|
|
||||||
batch:
|
|
||||||
max_bytes: 2049000
|
|
||||||
out_of_order_action: accept
|
|
||||||
remove_label_fields: true
|
|
||||||
remove_timestamp: true
|
|
||||||
labels:
|
|
||||||
hostname: >-
|
|
||||||
{{`{{ host }}`}}
|
|
||||||
loki_kubernetes:
|
|
||||||
type: loki
|
|
||||||
inputs:
|
|
||||||
- kubernetes_logs_remap
|
|
||||||
endpoint: http://loki-gateway:80
|
|
||||||
encoding:
|
|
||||||
codec: json
|
|
||||||
batch:
|
|
||||||
max_bytes: 2049000
|
|
||||||
out_of_order_action: accept
|
|
||||||
remove_label_fields: true
|
|
||||||
remove_timestamp: true
|
|
||||||
labels:
|
|
||||||
app: >-
|
|
||||||
{{`{{ custom_app_name }}`}}
|
|
||||||
namespace: >-
|
|
||||||
{{`{{ kubernetes.pod_namespace }}`}}
|
|
||||||
node: >-
|
|
||||||
{{`{{ kubernetes.pod_node_name }}`}}
|
|
||||||
loki_opnsense_filterlog:
|
|
||||||
type: loki
|
|
||||||
inputs:
|
|
||||||
- opnsense_filterlog_route._unmatched
|
|
||||||
- opnsense_filterlog_geoip
|
|
||||||
endpoint: http://loki-gateway:80
|
|
||||||
encoding:
|
|
||||||
codec: json
|
|
||||||
batch:
|
|
||||||
max_bytes: 2049000
|
|
||||||
out_of_order_action: accept
|
|
||||||
labels:
|
|
||||||
hostname: opnsense
|
|
||||||
extraVolumeMounts:
|
|
||||||
- name: geoip
|
|
||||||
mountPath: /geoip
|
|
||||||
extraVolumes:
|
|
||||||
- name: geoip
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: vector-geoipupdate-config
|
|
||||||
service:
|
|
||||||
enabled: true
|
|
||||||
type: LoadBalancer
|
|
||||||
affinity:
|
|
||||||
podAntiAffinity:
|
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- weight: 100
|
|
||||||
podAffinityTerm:
|
|
||||||
labelSelector:
|
|
||||||
matchExpressions:
|
|
||||||
- key: app.kubernetes.io/component
|
|
||||||
operator: In
|
|
||||||
values: ["Stateless-Aggregator"]
|
|
||||||
topologyKey: kubernetes.io/hostname
|
|
||||||
postRenderers:
|
|
||||||
- kustomize:
|
|
||||||
patchesJson6902:
|
|
||||||
- target:
|
|
||||||
kind: Service
|
|
||||||
name: vector-aggregator
|
|
||||||
patch:
|
|
||||||
- op: add
|
|
||||||
path: /spec/externalIPs
|
|
||||||
value: ["${CLUSTER_LB_SYSLOG}"]
|
|
||||||
- op: replace
|
|
||||||
path: /spec/externalTrafficPolicy
|
|
||||||
value: Local
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: tigera-operator
|
|
||||||
namespace: tigera-operator
|
|
||||||
spec:
|
|
||||||
interval: 15m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: tigera-operator
|
|
||||||
version: v3.24.5
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: project-calico-charts
|
|
||||||
namespace: flux-system
|
|
||||||
install:
|
|
||||||
createNamespace: true
|
|
||||||
crds: CreateReplace
|
|
||||||
remediation:
|
|
||||||
retries: 5
|
|
||||||
upgrade:
|
|
||||||
crds: CreateReplace
|
|
||||||
remediation:
|
|
||||||
retries: 5
|
|
||||||
values:
|
|
||||||
installation:
|
|
||||||
enabled: true
|
|
||||||
registry: quay.io
|
|
||||||
imagePath: calico
|
|
||||||
calicoNetwork:
|
|
||||||
bgp: Enabled
|
|
||||||
hostPorts: Disabled
|
|
||||||
# Note: The ipPools section cannot be modified post-install.
|
|
||||||
ipPools:
|
|
||||||
- blockSize: 26
|
|
||||||
cidr: "${NET_POD_CIDR}"
|
|
||||||
encapsulation: None
|
|
||||||
natOutgoing: Enabled
|
|
||||||
nodeSelector: all()
|
|
||||||
linuxDataplane: Iptables
|
|
||||||
multiInterfaceMode: None
|
|
||||||
nodeAddressAutodetectionV4:
|
|
||||||
cidrs:
|
|
||||||
- "${NET_NODE_CIDR}"
|
|
||||||
nodeMetricsPort: 9091
|
|
||||||
typhaMetricsPort: 9093
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
|
||||||
kind: HelmRepository
|
|
||||||
metadata:
|
|
||||||
name: jetstack-charts
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 1h
|
|
||||||
url: https://charts.jetstack.io/
|
|
||||||
timeout: 3m
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- bitnami-charts.yaml
|
|
||||||
- bjw-s-charts.yaml
|
|
||||||
- cert-manager-webhook-ovh.yaml
|
|
||||||
- cloudnative-pg-charts.yaml
|
|
||||||
- descheduler-charts.yaml
|
|
||||||
- drone-charts.yaml
|
|
||||||
- dysnix-charts.yaml
|
|
||||||
- emxq-charts.yaml
|
|
||||||
- external-dns-charts.yaml
|
|
||||||
- gitea-charts.yaml
|
|
||||||
- grafana-charts.yaml
|
|
||||||
- ingress-nginx-charts.yaml
|
|
||||||
- jetstack-charts.yaml
|
|
||||||
- k8s-gateway-charts.yaml
|
|
||||||
- kyverno-charts.yaml
|
|
||||||
- metrics-server-charts.yaml
|
|
||||||
- node-feature-discovery.yaml
|
|
||||||
- project-calico-charts.yaml
|
|
||||||
- prometheus-community-charts.yaml
|
|
||||||
- rook-ceph-charts.yaml
|
|
||||||
- stakater-charts.yaml
|
|
||||||
- vector-charts.yaml
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
|
||||||
kind: HelmRepository
|
|
||||||
metadata:
|
|
||||||
name: project-calico-charts
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 1h
|
|
||||||
url: https://projectcalico.docs.tigera.io/charts
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: rook-direct-mount
|
|
||||||
namespace: rook-ceph
|
|
||||||
labels:
|
|
||||||
app: rook-direct-mount
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: rook-direct-mount
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: rook-direct-mount
|
|
||||||
spec:
|
|
||||||
dnsPolicy: ClusterFirstWithHostNet
|
|
||||||
containers:
|
|
||||||
- name: rook-direct-mount
|
|
||||||
image: rook/ceph:v1.10.5
|
|
||||||
command: ["/usr/local/bin/toolbox.sh"]
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
env:
|
|
||||||
- name: ROOK_CEPH_USERNAME
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: rook-ceph-mon
|
|
||||||
key: ceph-username
|
|
||||||
- name: ROOK_CEPH_SECRET
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: rook-ceph-mon
|
|
||||||
key: ceph-secret
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /dev
|
|
||||||
name: dev
|
|
||||||
- mountPath: /sys/bus
|
|
||||||
name: sysbus
|
|
||||||
- mountPath: /lib/modules
|
|
||||||
name: libmodules
|
|
||||||
- name: mon-endpoint-volume
|
|
||||||
mountPath: /etc/rook
|
|
||||||
securityContext:
|
|
||||||
runAsUser: 0
|
|
||||||
runAsGroup: 0
|
|
||||||
# if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
|
|
||||||
hostNetwork: true
|
|
||||||
volumes:
|
|
||||||
- name: dev
|
|
||||||
hostPath:
|
|
||||||
path: /dev
|
|
||||||
- name: sysbus
|
|
||||||
hostPath:
|
|
||||||
path: /sys/bus
|
|
||||||
- name: libmodules
|
|
||||||
hostPath:
|
|
||||||
path: /lib/modules
|
|
||||||
- name: mon-endpoint-volume
|
|
||||||
configMap:
|
|
||||||
name: rook-ceph-mon-endpoints
|
|
||||||
items:
|
|
||||||
- key: data
|
|
||||||
path: mon-endpoints
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
---
|
|
||||||
kind: Deployment
|
|
||||||
apiVersion: apps/v1
|
|
||||||
metadata:
|
|
||||||
name: snapshot-controller
|
|
||||||
namespace: rook-ceph
|
|
||||||
spec:
|
|
||||||
replicas: 2
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: snapshot-controller
|
|
||||||
minReadySeconds: 15
|
|
||||||
strategy:
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 0
|
|
||||||
maxUnavailable: 1
|
|
||||||
type: RollingUpdate
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: snapshot-controller
|
|
||||||
spec:
|
|
||||||
serviceAccount: snapshot-controller
|
|
||||||
containers:
|
|
||||||
- name: snapshot-controller
|
|
||||||
image: k8s.gcr.io/sig-storage/snapshot-controller:v6.1.0
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
args:
|
|
||||||
- "--v=5"
|
|
||||||
- "--leader-election=true"
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: snapshot-controller
|
|
||||||
namespace: rook-ceph
|
|
||||||
---
|
|
||||||
kind: ClusterRole
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: snapshot-controller-runner
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["persistentvolumes"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["persistentvolumeclaims"]
|
|
||||||
verbs: ["get", "list", "watch", "update"]
|
|
||||||
- apiGroups: ["storage.k8s.io"]
|
|
||||||
resources: ["storageclasses"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["events"]
|
|
||||||
verbs: ["list", "watch", "create", "update", "patch"]
|
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
|
||||||
resources: ["volumesnapshotclasses"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
|
||||||
resources: ["volumesnapshotcontents"]
|
|
||||||
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
|
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
|
||||||
resources: ["volumesnapshotcontents/status"]
|
|
||||||
verbs: ["patch"]
|
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
|
||||||
resources: ["volumesnapshots"]
|
|
||||||
verbs: ["get", "list", "watch", "update", "patch"]
|
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
|
||||||
resources: ["volumesnapshots/status"]
|
|
||||||
verbs: ["update", "patch"]
|
|
||||||
---
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: snapshot-controller-role
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: snapshot-controller
|
|
||||||
namespace: rook-ceph
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: snapshot-controller-runner
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
---
|
|
||||||
kind: Role
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: snapshot-controller-leaderelection
|
|
||||||
namespace: rook-ceph
|
|
||||||
rules:
|
|
||||||
- apiGroups: ["coordination.k8s.io"]
|
|
||||||
resources: ["leases"]
|
|
||||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
|
||||||
---
|
|
||||||
kind: RoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: snapshot-controller-leaderelection
|
|
||||||
namespace: rook-ceph
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: snapshot-controller
|
|
||||||
roleRef:
|
|
||||||
kind: Role
|
|
||||||
name: snapshot-controller-leaderelection
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
@@ -1,54 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
|
||||||
kind: GitRepository
|
|
||||||
metadata:
|
|
||||||
name: kube-prometheus-stack-source
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 1h
|
|
||||||
url: https://github.com./prometheus-community/helm-charts.git
|
|
||||||
ref:
|
|
||||||
# renovate: registryUrl=https://prometheus-community.github.io/helm-charts
|
|
||||||
tag: kube-prometheus-stack-36.2.0
|
|
||||||
ignore: |
|
|
||||||
# exclude all
|
|
||||||
/*
|
|
||||||
# include deploy crds dir
|
|
||||||
!/charts/kube-prometheus-stack/crds
|
|
||||||
---
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: kube-prometheus-stack-crds
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 15m
|
|
||||||
prune: false
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: kube-prometheus-stack-source
|
|
||||||
healthChecks:
|
|
||||||
- apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
name: alertmanagerconfigs.monitoring.coreos.com
|
|
||||||
- apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
name: alertmanagers.monitoring.coreos.com
|
|
||||||
- apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
name: podmonitors.monitoring.coreos.com
|
|
||||||
- apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
name: probes.monitoring.coreos.com
|
|
||||||
- apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
name: prometheuses.monitoring.coreos.com
|
|
||||||
- apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
name: prometheusrules.monitoring.coreos.com
|
|
||||||
- apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
name: servicemonitors.monitoring.coreos.com
|
|
||||||
- apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
name: thanosrulers.monitoring.coreos.com
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: apps
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m0s
|
|
||||||
dependsOn:
|
|
||||||
- name: core
|
|
||||||
path: ./cluster/apps
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-cluster
|
|
||||||
decryption:
|
|
||||||
provider: sops
|
|
||||||
secretRef:
|
|
||||||
name: sops-age
|
|
||||||
postBuild:
|
|
||||||
substitute: {}
|
|
||||||
substituteFrom:
|
|
||||||
- kind: ConfigMap
|
|
||||||
name: cluster-settings
|
|
||||||
- kind: Secret
|
|
||||||
name: cluster-secrets
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: charts
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m0s
|
|
||||||
path: ./cluster/charts
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-cluster
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: configuration
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m0s
|
|
||||||
path: ./cluster/configuration
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-cluster
|
|
||||||
decryption:
|
|
||||||
provider: sops
|
|
||||||
secretRef:
|
|
||||||
name: sops-age
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: core
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m0s
|
|
||||||
dependsOn:
|
|
||||||
- name: charts
|
|
||||||
- name: configuration
|
|
||||||
- name: crds
|
|
||||||
path: ./cluster/core
|
|
||||||
prune: false
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-cluster
|
|
||||||
decryption:
|
|
||||||
provider: sops
|
|
||||||
secretRef:
|
|
||||||
name: sops-age
|
|
||||||
postBuild:
|
|
||||||
substitute: {}
|
|
||||||
substituteFrom:
|
|
||||||
- kind: ConfigMap
|
|
||||||
name: cluster-settings
|
|
||||||
- kind: Secret
|
|
||||||
name: cluster-secrets
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: crds
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m0s
|
|
||||||
path: ./cluster/crds
|
|
||||||
prune: false
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-cluster
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
|
||||||
kind: GitRepository
|
|
||||||
metadata:
|
|
||||||
name: flux-cluster
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
# https://github.com/k8s-at-home/template-cluster-k3s/issues/324
|
|
||||||
url: ssh://git@github.com/auricom/home-ops
|
|
||||||
ref:
|
|
||||||
branch: main
|
|
||||||
secretRef:
|
|
||||||
name: github-deploy-key
|
|
||||||
---
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: flux-cluster
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
path: ./cluster/flux
|
|
||||||
prune: true
|
|
||||||
wait: false
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-cluster
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 176 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 115 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 49 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 48 KiB |
49
docs/flux.md
49
docs/flux.md
@@ -1,49 +0,0 @@
|
|||||||
# Flux
|
|
||||||
|
|
||||||
## Install the CLI tool
|
|
||||||
|
|
||||||
```sh
|
|
||||||
brew install fluxcd/tap/flux
|
|
||||||
```
|
|
||||||
|
|
||||||
## Install the cluster components
|
|
||||||
|
|
||||||
_For full installation guide visit the [Flux installation guide](https://toolkit.fluxcd.io/guides/installation/)_
|
|
||||||
|
|
||||||
Check if you cluster is ready for Flux
|
|
||||||
|
|
||||||
```sh
|
|
||||||
flux check --pre
|
|
||||||
```
|
|
||||||
|
|
||||||
Install Flux into your cluster
|
|
||||||
|
|
||||||
```sh
|
|
||||||
flux bootstrap github \
|
|
||||||
--owner=auricom \
|
|
||||||
--repository=home-ops \
|
|
||||||
--path=cluster/base \
|
|
||||||
--personal \
|
|
||||||
--private=false \
|
|
||||||
--network-policy=false
|
|
||||||
```
|
|
||||||
|
|
||||||
## Useful commands
|
|
||||||
|
|
||||||
Force flux to sync your repository:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
flux reconcile source git flux-system
|
|
||||||
```
|
|
||||||
|
|
||||||
Force flux to sync a helm release:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
flux reconcile helmrelease sonarr -n default
|
|
||||||
```
|
|
||||||
|
|
||||||
Force flux to sync a helm repository:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
flux reconcile source helm ingress-nginx-charts -n flux-system
|
|
||||||
```
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
# post-install
|
|
||||||
|
|
||||||
/etc/rancher/k3s/
|
|
||||||
|
|
||||||
add
|
|
||||||
|
|
||||||
```bash
|
|
||||||
kubelet-arg: ['cluster-dns=169.254.20.10']
|
|
||||||
```
|
|
||||||
27
docs/pxe.md
27
docs/pxe.md
@@ -1,27 +0,0 @@
|
|||||||
# Opnsense | PXE
|
|
||||||
|
|
||||||
## Setting up TFTP
|
|
||||||
|
|
||||||
- Setup TFTP and network booting on DHCPv4 server
|
|
||||||
- Create an `nginx` location to file system `/var/lib/tftpboot`
|
|
||||||
- Create an nginx http server listening on 30080 TCP
|
|
||||||
- Enable `dnsmasq` in the Opnsense services settings (set port to `63`)
|
|
||||||
- Copy over `pxe.conf` to `/usr/local/etc/dnsmasq.conf.d/pxe.conf`
|
|
||||||
- SSH into opnsense and run the following commands...
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ mkdir -p /var/lib/tftpboot/pxelinux/
|
|
||||||
$ curl https://releases.ubuntu.com/20.04/ubuntu-20.04.4-live-server-amd64.iso -o /var/lib/tftpboot/ubuntu-20.04.4-live-server-amd64.iso
|
|
||||||
$ mount -t cd9660 /dev/`mdconfig -f /var/lib/tftpboot/ubuntu-20.04.4-live-server-amd64.iso` /mnt
|
|
||||||
$ cp /mnt/casper/vmlinuz /var/lib/tftpboot/pxelinux/
|
|
||||||
$ cp /mnt/casper/initrd /var/lib/tftpboot/pxelinux/
|
|
||||||
$ umount /mnt
|
|
||||||
$ curl http://archive.ubuntu.com/ubuntu/dists/focal/main/uefi/grub2-amd64/current/grubnetx64.efi.signed -o /var/lib/tftpboot/pxelinux/pxelinux.0
|
|
||||||
```
|
|
||||||
|
|
||||||
- Copy `grub/grub.conf` into `/var/lib/tftpboot/grub/grub.conf`
|
|
||||||
- Copy `nodes/` into `/var/lib/tftpboot/nodes`
|
|
||||||
|
|
||||||
## PXE boot on bare-metal servers
|
|
||||||
|
|
||||||
Press F12 key during 15-20 seconds to enter PXE IPv4 boot option
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user