From 00f522121088628bba801f36dc85655a61aca936 Mon Sep 17 00:00:00 2001 From: auricom Date: Fri, 9 Apr 2021 11:35:18 +0200 Subject: [PATCH] Add ansible playbooks --- .github/yamllint.config.yaml | 3 +- server/README.md | 16 + server/ansible/ansible.cfg | 53 +++ .../group_vars/all/calico-settings.yml | 15 + .../group_vars/all/k3s-settings.yml | 23 ++ .../group_vars/all/registry-settings.yml | 11 + .../group_vars/all/rsyslog-settings.yml | 7 + .../group_vars/all/ubuntu-settings.yml | 23 ++ .../group_vars/server-nodes/k3s-settings.yml | 26 ++ .../group_vars/worker-nodes/k3s-settings.yml | 10 + .../home-cluster/host_vars/k3s-server.yml | 9 + .../home-cluster/host_vars/k3s-worker1.yml | 9 + .../home-cluster/host_vars/k3s-worker2.yml | 9 + .../home-cluster/host_vars/k3s-worker3.yml | 10 + .../ansible/inventory/home-cluster/hosts.yml | 17 + server/ansible/playbooks/k3s/install.yml | 14 + server/ansible/playbooks/k3s/nuke.yml | 32 ++ server/ansible/playbooks/k3s/upgrade.yml | 13 + .../playbooks/power-outage/shutdown.yml | 36 ++ server/ansible/playbooks/ubuntu/prepare.yml | 13 + server/ansible/playbooks/ubuntu/upgrade.yml | 22 ++ server/ansible/requirements.txt | 1 + server/ansible/requirements.yml | 6 + server/ansible/roles/k3s/tasks/calico.yml | 38 ++ server/ansible/roles/k3s/tasks/kubeconfig.yml | 20 + server/ansible/roles/k3s/tasks/main.yml | 21 ++ server/ansible/roles/k3s/tasks/registry.yml | 21 ++ .../templates/calico-bgpconfiguration.yaml.j2 | 8 + .../k3s/templates/calico-bgppeer.yaml.j2 | 8 + .../k3s/templates/calico-installation.yaml.j2 | 19 + .../roles/k3s/templates/registries.yaml.j2 | 20 + server/ansible/roles/ubuntu/tasks/boot.yml | 43 +++ server/ansible/roles/ubuntu/tasks/disks.yml | 19 + .../ansible/roles/ubuntu/tasks/filesystem.yml | 20 + server/ansible/roles/ubuntu/tasks/host.yml | 6 + server/ansible/roles/ubuntu/tasks/kernel.yml | 25 ++ server/ansible/roles/ubuntu/tasks/locale.yml | 44 +++ server/ansible/roles/ubuntu/tasks/main.yml | 51 +++ server/ansible/roles/ubuntu/tasks/network.yml | 23 ++ .../ansible/roles/ubuntu/tasks/packages.yml | 94 +++++ .../roles/ubuntu/tasks/power-button.yml | 15 + server/ansible/roles/ubuntu/tasks/rsyslog.yml | 19 + .../ubuntu/tasks/unattended-upgrades.yml | 37 ++ server/ansible/roles/ubuntu/tasks/user.yml | 35 ++ .../templates/rsyslog-50-promtail.conf.j2 | 4 + server/ansible/roles/ubuntu/vars/main.yml | 69 ++++ .../roles/xanmanning.k3s/.ansible-lint | 4 + .../.github/ISSUE_TEMPLATE/bug_report.md | 55 +++ .../.github/ISSUE_TEMPLATE/config.yml | 3 + .../.github/ISSUE_TEMPLATE/feature_request.md | 33 ++ .../.github/PULL_REQUEST_TEMPLATE.md | 37 ++ .../roles/xanmanning.k3s/.github/stale.yml | 18 + .../xanmanning.k3s/.github/workflows/ci.yml | 65 ++++ .../.github/workflows/release.yml | 32 ++ .../ansible/roles/xanmanning.k3s/.gitignore | 12 + server/ansible/roles/xanmanning.k3s/.yamllint | 33 ++ .../ansible/roles/xanmanning.k3s/CHANGELOG.md | 304 ++++++++++++++++ .../roles/xanmanning.k3s/CONTRIBUTING.md | 46 +++ .../ansible/roles/xanmanning.k3s/LICENSE.txt | 30 ++ server/ansible/roles/xanmanning.k3s/README.md | 344 ++++++++++++++++++ .../roles/xanmanning.k3s/defaults/main.yml | 99 +++++ .../xanmanning.k3s/documentation/README.md | 43 +++ .../configuration/2-node-ha-ext-datastore.md | 79 ++++ .../multiple-standalone-k3s-nodes.md | 71 ++++ .../node-labels-and-component-args.md | 39 ++ .../configuration/systemd-config.md | 19 + .../configuration/use-an-alternate-cni.md | 63 ++++ .../operations/extending-a-cluster.md | 69 ++++ .../operations/shrinking-a-cluster.md | 74 ++++ .../operations/stop-start-cluster.md | 93 +++++ .../documentation/operations/updating-k3s.md | 52 +++ .../documentation/quickstart-cluster.md | 147 ++++++++ .../documentation/quickstart-ha-cluster.md | 154 ++++++++ .../documentation/quickstart-single-node.md | 121 ++++++ .../roles/xanmanning.k3s/handlers/main.yml | 32 ++ .../xanmanning.k3s/meta/.galaxy_install_info | 2 + .../roles/xanmanning.k3s/meta/main.yml | 83 +++++ .../molecule/autodeploy/converge.yml | 16 + .../molecule/autodeploy/molecule.yml | 44 +++ .../molecule/autodeploy/prepare.yml | 9 + .../templates/00-ns-monitoring.yml.j2 | 4 + .../molecule/autodeploy/verify.yml | 9 + .../molecule/debug/converge.yml | 12 + .../molecule/debug/molecule.yml | 44 +++ .../xanmanning.k3s/molecule/debug/prepare.yml | 8 + .../xanmanning.k3s/molecule/debug/verify.yml | 9 + .../molecule/default/Dockerfile.j2 | 26 ++ .../molecule/default/INSTALL.rst | 22 ++ .../molecule/default/converge.yml | 9 + .../molecule/default/molecule.yml | 44 +++ .../molecule/default/playbook-download.yml | 9 + .../default/playbook-restart-cluster.yml | 9 + .../molecule/default/playbook-rootless.yml | 15 + .../molecule/default/playbook-standalone.yml | 9 + .../default/playbook-start-cluster.yml | 9 + .../default/playbook-stop-cluster.yml | 9 + .../default/playbook-uninstall-cluster.yml | 9 + .../molecule/default/prepare-rootless.yml | 23 ++ .../molecule/default/prepare.yml | 8 + .../molecule/default/tests/test_default.py | 14 + .../molecule/default/tests/test_default.pyc | Bin 0 -> 1025 bytes .../molecule/docker/converge.yml | 13 + .../molecule/docker/molecule.yml | 44 +++ .../molecule/docker/prepare.yml | 8 + .../molecule/highavailabilitydb/Dockerfile.j2 | 7 + .../molecule/highavailabilitydb/INSTALL.rst | 22 ++ .../molecule/highavailabilitydb/converge.yml | 17 + .../haproxy-loadbalancer.conf.j2 | 13 + .../molecule/highavailabilitydb/molecule.yml | 57 +++ .../molecule/highavailabilitydb/prepare.yml | 38 ++ .../highavailabilitydb/tests/test_default.py | 14 + .../highavailabilitydb/tests/test_default.pyc | Bin 0 -> 1025 bytes .../highavailabilityetcd/converge.yml | 17 + .../haproxy-loadbalancer.conf.j2 | 13 + .../highavailabilityetcd/molecule.yml | 49 +++ .../molecule/highavailabilityetcd/prepare.yml | 38 ++ .../molecule/nodeploy/converge.yml | 10 + .../molecule/nodeploy/k3s_agent.yml | 8 + .../molecule/nodeploy/k3s_server.yml | 14 + .../molecule/nodeploy/molecule.yml | 44 +++ .../molecule/nodeploy/prepare.yml | 8 + .../molecule/nodeploy/verify.yml | 9 + .../xanmanning.k3s/molecule/requirements.txt | 6 + .../roles/xanmanning.k3s/requirements.txt | 1 + .../tasks/build/configure-k3s-cluster.yml | 86 +++++ .../tasks/build/docker/amazon/install.yml | 12 + .../archlinux/install-prerequisites.yml | 1 + .../tasks/build/docker/archlinux/install.yml | 16 + .../docker/debian/install-prerequisites.yml | 29 ++ .../tasks/build/docker/install.yml | 16 + .../build/docker/opensuse-leap/install.yml | 16 + .../docker/redhat/install-prerequisites.yml | 55 +++ .../docker/suse/install-prerequisites.yml | 1 + .../tasks/build/docker/suse/install.yml | 16 + .../tasks/build/download-k3s.yml | 51 +++ .../tasks/build/get-systemd-context.yml | 10 + .../tasks/build/get-version.yml | 32 ++ .../tasks/build/install-k3s-directories.yml | 12 + .../tasks/build/install-k3s-node.yml | 58 +++ .../tasks/build/install-k3s.yml | 32 ++ ...configure-k3s-auto-deploying-manifests.yml | 18 + .../tasks/build/preconfigure-k3s.yml | 125 +++++++ .../roles/xanmanning.k3s/tasks/main.yml | 5 + .../tasks/operate/start-k3s.yml | 20 + .../xanmanning.k3s/tasks/operate/stop-k3s.yml | 20 + .../xanmanning.k3s/tasks/state-downloaded.yml | 6 + .../xanmanning.k3s/tasks/state-installed.yml | 53 +++ .../xanmanning.k3s/tasks/state-restarted.yml | 5 + .../xanmanning.k3s/tasks/state-started.yml | 3 + .../xanmanning.k3s/tasks/state-stopped.yml | 3 + .../tasks/state-uninstalled.yml | 25 ++ .../xanmanning.k3s/tasks/state-validated.yml | 7 + .../teardown/docker/amazon/uninstall.yml | 7 + .../teardown/docker/archlinux/uninstall.yml | 11 + .../docker/debian/uninstall-prerequisites.yml | 15 + .../docker/opensuse-leap/uninstall.yml | 11 + .../docker/redhat/uninstall-prerequisites.yml | 13 + .../docker/suse/uninstall-prerequisites.yml | 1 + .../tasks/teardown/docker/suse/uninstall.yml | 11 + .../tasks/teardown/docker/uninstall.yml | 14 + .../tasks/teardown/drain-and-remove-nodes.yml | 51 +++ .../tasks/teardown/uninstall-k3s.yml | 52 +++ .../validate/configuration/cluster-init.yml | 19 + .../configuration/control-node-count.yml | 45 +++ .../configuration/experimental-variables.yml | 31 ++ .../configuration/unsupported-rootless.yml | 62 ++++ .../validate/configuration/variables.yml | 55 +++ .../validate/environment/local/issue-data.yml | 82 +++++ .../validate/environment/local/packages.yml | 13 + .../validate/environment/remote/packages.yml | 23 ++ .../xanmanning.k3s/tasks/validate/main.yml | 15 + .../tasks/validate/post-install.yml | 5 + .../tasks/validate/pre-flight.yml | 27 ++ .../tasks/validate/state/control-plane.yml | 10 + .../tasks/validate/state/nodes.yml | 19 + .../tasks/validate/state/uninstalled.yml | 51 +++ .../xanmanning.k3s/templates/cluster-token.j2 | 1 + .../xanmanning.k3s/templates/config.yaml.j2 | 8 + .../templates/k3s-killall.sh.j2 | 69 ++++ .../templates/k3s-uninstall.sh.j2 | 50 +++ .../xanmanning.k3s/templates/k3s.service.j2 | 60 +++ .../roles/xanmanning.k3s/vars/main.yml | 145 ++++++++ 182 files changed, 5820 insertions(+), 2 deletions(-) create mode 100644 server/README.md create mode 100644 server/ansible/ansible.cfg create mode 100644 server/ansible/inventory/home-cluster/group_vars/all/calico-settings.yml create mode 100644 server/ansible/inventory/home-cluster/group_vars/all/k3s-settings.yml create mode 100644 server/ansible/inventory/home-cluster/group_vars/all/registry-settings.yml create mode 100644 server/ansible/inventory/home-cluster/group_vars/all/rsyslog-settings.yml create mode 100644 server/ansible/inventory/home-cluster/group_vars/all/ubuntu-settings.yml create mode 100644 server/ansible/inventory/home-cluster/group_vars/server-nodes/k3s-settings.yml create mode 100644 server/ansible/inventory/home-cluster/group_vars/worker-nodes/k3s-settings.yml create mode 100644 server/ansible/inventory/home-cluster/host_vars/k3s-server.yml create mode 100644 server/ansible/inventory/home-cluster/host_vars/k3s-worker1.yml create mode 100644 server/ansible/inventory/home-cluster/host_vars/k3s-worker2.yml create mode 100644 server/ansible/inventory/home-cluster/host_vars/k3s-worker3.yml create mode 100644 server/ansible/inventory/home-cluster/hosts.yml create mode 100644 server/ansible/playbooks/k3s/install.yml create mode 100644 server/ansible/playbooks/k3s/nuke.yml create mode 100644 server/ansible/playbooks/k3s/upgrade.yml create mode 100644 server/ansible/playbooks/power-outage/shutdown.yml create mode 100644 server/ansible/playbooks/ubuntu/prepare.yml create mode 100644 server/ansible/playbooks/ubuntu/upgrade.yml create mode 100644 server/ansible/requirements.txt create mode 100644 server/ansible/requirements.yml create mode 100644 server/ansible/roles/k3s/tasks/calico.yml create mode 100644 server/ansible/roles/k3s/tasks/kubeconfig.yml create mode 100644 server/ansible/roles/k3s/tasks/main.yml create mode 100644 server/ansible/roles/k3s/tasks/registry.yml create mode 100644 server/ansible/roles/k3s/templates/calico-bgpconfiguration.yaml.j2 create mode 100644 server/ansible/roles/k3s/templates/calico-bgppeer.yaml.j2 create mode 100644 server/ansible/roles/k3s/templates/calico-installation.yaml.j2 create mode 100644 server/ansible/roles/k3s/templates/registries.yaml.j2 create mode 100644 server/ansible/roles/ubuntu/tasks/boot.yml create mode 100644 server/ansible/roles/ubuntu/tasks/disks.yml create mode 100644 server/ansible/roles/ubuntu/tasks/filesystem.yml create mode 100644 server/ansible/roles/ubuntu/tasks/host.yml create mode 100644 server/ansible/roles/ubuntu/tasks/kernel.yml create mode 100644 server/ansible/roles/ubuntu/tasks/locale.yml create mode 100644 server/ansible/roles/ubuntu/tasks/main.yml create mode 100644 server/ansible/roles/ubuntu/tasks/network.yml create mode 100644 server/ansible/roles/ubuntu/tasks/packages.yml create mode 100644 server/ansible/roles/ubuntu/tasks/power-button.yml create mode 100644 server/ansible/roles/ubuntu/tasks/rsyslog.yml create mode 100644 server/ansible/roles/ubuntu/tasks/unattended-upgrades.yml create mode 100644 server/ansible/roles/ubuntu/tasks/user.yml create mode 100644 server/ansible/roles/ubuntu/templates/rsyslog-50-promtail.conf.j2 create mode 100644 server/ansible/roles/ubuntu/vars/main.yml create mode 100644 server/ansible/roles/xanmanning.k3s/.ansible-lint create mode 100644 server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/bug_report.md create mode 100644 server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/config.yml create mode 100644 server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/feature_request.md create mode 100644 server/ansible/roles/xanmanning.k3s/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 server/ansible/roles/xanmanning.k3s/.github/stale.yml create mode 100644 server/ansible/roles/xanmanning.k3s/.github/workflows/ci.yml create mode 100644 server/ansible/roles/xanmanning.k3s/.github/workflows/release.yml create mode 100644 server/ansible/roles/xanmanning.k3s/.gitignore create mode 100644 server/ansible/roles/xanmanning.k3s/.yamllint create mode 100644 server/ansible/roles/xanmanning.k3s/CHANGELOG.md create mode 100644 server/ansible/roles/xanmanning.k3s/CONTRIBUTING.md create mode 100644 server/ansible/roles/xanmanning.k3s/LICENSE.txt create mode 100644 server/ansible/roles/xanmanning.k3s/README.md create mode 100644 server/ansible/roles/xanmanning.k3s/defaults/main.yml create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/README.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/configuration/2-node-ha-ext-datastore.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/configuration/multiple-standalone-k3s-nodes.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/configuration/node-labels-and-component-args.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/configuration/systemd-config.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/configuration/use-an-alternate-cni.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/operations/extending-a-cluster.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/operations/shrinking-a-cluster.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/operations/stop-start-cluster.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/operations/updating-k3s.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/quickstart-cluster.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/quickstart-ha-cluster.md create mode 100644 server/ansible/roles/xanmanning.k3s/documentation/quickstart-single-node.md create mode 100644 server/ansible/roles/xanmanning.k3s/handlers/main.yml create mode 100644 server/ansible/roles/xanmanning.k3s/meta/.galaxy_install_info create mode 100644 server/ansible/roles/xanmanning.k3s/meta/main.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/autodeploy/converge.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/autodeploy/molecule.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/autodeploy/prepare.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/autodeploy/templates/00-ns-monitoring.yml.j2 create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/autodeploy/verify.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/debug/converge.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/debug/molecule.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/debug/prepare.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/debug/verify.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/Dockerfile.j2 create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/INSTALL.rst create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/converge.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/molecule.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/playbook-download.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/playbook-restart-cluster.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/playbook-rootless.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/playbook-standalone.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/playbook-start-cluster.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/playbook-stop-cluster.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/playbook-uninstall-cluster.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/prepare-rootless.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/prepare.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/tests/test_default.py create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/default/tests/test_default.pyc create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/docker/converge.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/docker/molecule.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/docker/prepare.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/Dockerfile.j2 create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/INSTALL.rst create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/converge.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/haproxy-loadbalancer.conf.j2 create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/molecule.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/prepare.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/tests/test_default.py create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/tests/test_default.pyc create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/converge.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/haproxy-loadbalancer.conf.j2 create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/molecule.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/prepare.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/nodeploy/converge.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/nodeploy/k3s_agent.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/nodeploy/k3s_server.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/nodeploy/molecule.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/nodeploy/prepare.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/nodeploy/verify.yml create mode 100644 server/ansible/roles/xanmanning.k3s/molecule/requirements.txt create mode 100644 server/ansible/roles/xanmanning.k3s/requirements.txt create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/configure-k3s-cluster.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/docker/amazon/install.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/docker/archlinux/install-prerequisites.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/docker/archlinux/install.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/docker/debian/install-prerequisites.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/docker/install.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/docker/opensuse-leap/install.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/docker/redhat/install-prerequisites.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/docker/suse/install-prerequisites.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/docker/suse/install.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/download-k3s.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/get-systemd-context.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/get-version.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s-directories.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s-node.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/preconfigure-k3s-auto-deploying-manifests.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/build/preconfigure-k3s.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/main.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/operate/start-k3s.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/operate/stop-k3s.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/state-downloaded.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/state-installed.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/state-restarted.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/state-started.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/state-stopped.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/state-uninstalled.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/state-validated.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/amazon/uninstall.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/archlinux/uninstall.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/debian/uninstall-prerequisites.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/opensuse-leap/uninstall.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/redhat/uninstall-prerequisites.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/suse/uninstall-prerequisites.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/suse/uninstall.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/uninstall.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/teardown/drain-and-remove-nodes.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/teardown/uninstall-k3s.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/cluster-init.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/control-node-count.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/experimental-variables.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/unsupported-rootless.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/variables.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/environment/local/issue-data.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/environment/local/packages.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/environment/remote/packages.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/main.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/post-install.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/pre-flight.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/state/control-plane.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/state/nodes.yml create mode 100644 server/ansible/roles/xanmanning.k3s/tasks/validate/state/uninstalled.yml create mode 100644 server/ansible/roles/xanmanning.k3s/templates/cluster-token.j2 create mode 100644 server/ansible/roles/xanmanning.k3s/templates/config.yaml.j2 create mode 100644 server/ansible/roles/xanmanning.k3s/templates/k3s-killall.sh.j2 create mode 100644 server/ansible/roles/xanmanning.k3s/templates/k3s-uninstall.sh.j2 create mode 100644 server/ansible/roles/xanmanning.k3s/templates/k3s.service.j2 create mode 100644 server/ansible/roles/xanmanning.k3s/vars/main.yml diff --git a/.github/yamllint.config.yaml b/.github/yamllint.config.yaml index c07deb3a1..1d18a25fd 100644 --- a/.github/yamllint.config.yaml +++ b/.github/yamllint.config.yaml @@ -1,8 +1,7 @@ ignore: | .yamllint.yml .github/ - ansible/ - integrations/ + server/ansible ignore/ secrets/ *-crds.yaml diff --git a/server/README.md b/server/README.md new file mode 100644 index 000000000..053eac9b8 --- /dev/null +++ b/server/README.md @@ -0,0 +1,16 @@ +# Server infrastructure + +These Ansible Playbooks and Roles are for preparing an Ubuntu 20.10.x OS to play nicely with Kubernetes and standing up k3s ontop of the nodes. + +## Commands + +Commands to run can be found in my Ansible Taskfile located [here](https://github.com/onedr0p/home-cluster/blob/main/.taskfiles/ansible.yml) + +e.g. + +```bash +# List hosts in my Ansible inventory +task ansible:list +# Ping hosts in my Ansible inventory +task ansible:ping +``` diff --git a/server/ansible/ansible.cfg b/server/ansible/ansible.cfg new file mode 100644 index 000000000..c21ac7e7d --- /dev/null +++ b/server/ansible/ansible.cfg @@ -0,0 +1,53 @@ +[defaults] + +#--- General settings +nocows = True +forks = 8 +module_name = command +deprecation_warnings = True +executable = /bin/bash + +#--- Files/Directory settings +log_path = ~/ansible.log +inventory = ./inventory +library = /usr/share/my_modules +remote_tmp = ~/.ansible/tmp +local_tmp = ~/.ansible/tmp +roles_path = ./roles +retry_files_enabled = False + +#--- Fact Caching settings +fact_caching = jsonfile +fact_caching_connection = ~/.ansible/facts_cache +fact_caching_timeout = 7200 + +#--- SSH settings +remote_port = 22 +timeout = 60 +host_key_checking = False +ssh_executable = /usr/bin/ssh +private_key_file = ~/.ssh/id_rsa + +force_valid_group_names = ignore + +#--- Speed +callback_whitelist = ansible.posix.profile_tasks +internal_poll_interval = 0.001 + +[inventory] +unparsed_is_failed = true + +[privilege_escalation] +become = True +become_method = sudo +become_user = root +become_ask_pass = False + +[ssh_connection] +scp_if_ssh = smart +transfer_method = smart +retries = 3 +timeout = 10 +ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s +pipelining = True +control_path = %(directory)s/%%h-%%r diff --git a/server/ansible/inventory/home-cluster/group_vars/all/calico-settings.yml b/server/ansible/inventory/home-cluster/group_vars/all/calico-settings.yml new file mode 100644 index 000000000..a530c61f1 --- /dev/null +++ b/server/ansible/inventory/home-cluster/group_vars/all/calico-settings.yml @@ -0,0 +1,15 @@ +--- +# Use Calico CNI driver +calico: + enabled: true + operator_manifest: "https://docs.projectcalico.org/manifests/tigera-operator.yaml" + # Enabling BGP requires your router set up to handle it + bgp: + enabled: true + # peer is usually your router e.g. 192.168.1.1 + peer: 192.168.8.1 + as: 64512 + # externalIPs is the network you want services to consume + # this network should not exist or be defined anywhere in your network + # e.g. 192.168.169.0/24 + externalIPs: 192.168.169.0/24 diff --git a/server/ansible/inventory/home-cluster/group_vars/all/k3s-settings.yml b/server/ansible/inventory/home-cluster/group_vars/all/k3s-settings.yml new file mode 100644 index 000000000..af3b48c3d --- /dev/null +++ b/server/ansible/inventory/home-cluster/group_vars/all/k3s-settings.yml @@ -0,0 +1,23 @@ +--- +# +# Below vars are for the xanmanning.k3s role +# ...see https://github.com/PyratLabs/ansible-role-k3s#globalcluster-variables +# + +# Use a specific version of k3s +k3s_release_version: "v1.20.5+k3s1" + +# Install using hard links rather than symbolic links. +# ...if you are using the system-upgrade-controller you will need to use hard links rather than symbolic links as the controller will not be able to follow symbolic links. +k3s_install_hard_links: true + +# Escalate user privileges for all tasks. +k3s_become_for_all: true + +# Use experimental features (spooky!) +k3s_use_experimental: false + +# Enable debugging +k3s_debug: false +# # Enable embedded-etcd +# k3s_etcd_datastore: true diff --git a/server/ansible/inventory/home-cluster/group_vars/all/registry-settings.yml b/server/ansible/inventory/home-cluster/group_vars/all/registry-settings.yml new file mode 100644 index 000000000..82cfde924 --- /dev/null +++ b/server/ansible/inventory/home-cluster/group_vars/all/registry-settings.yml @@ -0,0 +1,11 @@ +--- + +# Configure a registry mirror, useful for having a pull-through cache +mirror_registry: + address: "https://registry-cache.devbu.io" + +# Configure private registries +private_registries: +- address: "https://registry.devbu.io" + username: "admin" + password: "password" diff --git a/server/ansible/inventory/home-cluster/group_vars/all/rsyslog-settings.yml b/server/ansible/inventory/home-cluster/group_vars/all/rsyslog-settings.yml new file mode 100644 index 000000000..4af395168 --- /dev/null +++ b/server/ansible/inventory/home-cluster/group_vars/all/rsyslog-settings.yml @@ -0,0 +1,7 @@ +--- +# Enable rsyslog +# ...requires a rsyslog server already set up +rsyslog: + enabled: false + ip: 192.168.69.155 + port: 1514 diff --git a/server/ansible/inventory/home-cluster/group_vars/all/ubuntu-settings.yml b/server/ansible/inventory/home-cluster/group_vars/all/ubuntu-settings.yml new file mode 100644 index 000000000..074e80c24 --- /dev/null +++ b/server/ansible/inventory/home-cluster/group_vars/all/ubuntu-settings.yml @@ -0,0 +1,23 @@ +--- +# Enable to skip apt upgrade +skip_upgrade_packages: false +# Enable to skip removing crufty packages +skip_remove_packages: false + +# Timezone for the servers +timezone: "Europe/Paris" + +# # Set custom ntp servers +# ntp_servers: +# primary: +# - "time.cloudflare.com" +# - "time.google.com" +# fallback: +# - "0.us.pool.ntp.org" +# - "1.us.pool.ntp.org" +# - "2.us.pool.ntp.org" +# - "3.us.pool.ntp.org" + +# Additional ssh public keys to add to the nodes +ssh_authorized_keys: + - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL+GMHgvbtf6f7xUMAQR+vZFfD/mIIfIDNX5iP8tDRXZ claude@claude-thinkpad-fedora" diff --git a/server/ansible/inventory/home-cluster/group_vars/server-nodes/k3s-settings.yml b/server/ansible/inventory/home-cluster/group_vars/server-nodes/k3s-settings.yml new file mode 100644 index 000000000..942a5b97b --- /dev/null +++ b/server/ansible/inventory/home-cluster/group_vars/server-nodes/k3s-settings.yml @@ -0,0 +1,26 @@ +--- + +# https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/ +# https://github.com/PyratLabs/ansible-role-k3s#server-control-plane-configuration + +# Define the host as control plane nodes +k3s_control_node: true + +# k3s settings for all control-plane nodes +k3s_server: + node-ip: "{{ ansible_host }}" + docker: false + flannel-backend: 'none' # This needs to be in quotes + disable: + - flannel + - traefik + - servicelb + - metrics-server + - local-storage + disable-network-policy: true + disable-cloud-controller: true + write-kubeconfig-mode: "644" + # Network CIDR to use for pod IPs + cluster-cidr: "10.69.0.0/16" + # Network CIDR to use for service IPs + service-cidr: "10.96.0.0/16" diff --git a/server/ansible/inventory/home-cluster/group_vars/worker-nodes/k3s-settings.yml b/server/ansible/inventory/home-cluster/group_vars/worker-nodes/k3s-settings.yml new file mode 100644 index 000000000..045f66447 --- /dev/null +++ b/server/ansible/inventory/home-cluster/group_vars/worker-nodes/k3s-settings.yml @@ -0,0 +1,10 @@ +--- +# https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/ +# https://github.com/PyratLabs/ansible-role-k3s#agent-worker-configuration + +# Don't define the host as control plane nodes +k3s_control_node: false + +# k3s settings for all worker nodes +k3s_agent: + node-ip: "{{ ansible_host }}" diff --git a/server/ansible/inventory/home-cluster/host_vars/k3s-server.yml b/server/ansible/inventory/home-cluster/host_vars/k3s-server.yml new file mode 100644 index 000000000..cc0160f10 --- /dev/null +++ b/server/ansible/inventory/home-cluster/host_vars/k3s-server.yml @@ -0,0 +1,9 @@ +--- +# IP address of node +ansible_host: "192.168.9.100" + +# Ansible user to ssh into servers with +ansible_user: "ubuntu" +# ansible_ssh_pass: "ubuntu" +# ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null" +# ansible_become_pass: "ubuntu" diff --git a/server/ansible/inventory/home-cluster/host_vars/k3s-worker1.yml b/server/ansible/inventory/home-cluster/host_vars/k3s-worker1.yml new file mode 100644 index 000000000..ce00b6451 --- /dev/null +++ b/server/ansible/inventory/home-cluster/host_vars/k3s-worker1.yml @@ -0,0 +1,9 @@ +--- +# IP address of node +ansible_host: "192.168.9.105" + +# Ansible user to ssh into servers with +ansible_user: "ubuntu" +# ansible_ssh_pass: "ubuntu" +# ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null" +# ansible_become_pass: "ubuntu" diff --git a/server/ansible/inventory/home-cluster/host_vars/k3s-worker2.yml b/server/ansible/inventory/home-cluster/host_vars/k3s-worker2.yml new file mode 100644 index 000000000..88e57bab5 --- /dev/null +++ b/server/ansible/inventory/home-cluster/host_vars/k3s-worker2.yml @@ -0,0 +1,9 @@ +--- +# IP address of node +ansible_host: "192.168.9.106" + +# Ansible user to ssh into servers with +ansible_user: "ubuntu" +# ansible_ssh_pass: "ubuntu" +# ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null" +# ansible_become_pass: "ubuntu" diff --git a/server/ansible/inventory/home-cluster/host_vars/k3s-worker3.yml b/server/ansible/inventory/home-cluster/host_vars/k3s-worker3.yml new file mode 100644 index 000000000..0c463e9ca --- /dev/null +++ b/server/ansible/inventory/home-cluster/host_vars/k3s-worker3.yml @@ -0,0 +1,10 @@ +--- +# IP address of node +ansible_host: "192.168.9.107" + +# Ansible user to ssh into servers with +ansible_user: "ubuntu" +# ansible_ssh_pass: "ubuntu" +# ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null" +# ansible_become_pass: "ubuntu" +disks: "" diff --git a/server/ansible/inventory/home-cluster/hosts.yml b/server/ansible/inventory/home-cluster/hosts.yml new file mode 100644 index 000000000..dd17d799e --- /dev/null +++ b/server/ansible/inventory/home-cluster/hosts.yml @@ -0,0 +1,17 @@ +--- +all: + children: + # Control Plane group, do not change the 'control-plane' name + # hosts should match the filenames in 'host_vars' + server-nodes: + hosts: + k3s-server: + # Node group, do not change the 'node' name + # hosts should match the filenames in 'host_vars' + worker-nodes: + hosts: + k3s-worker1: + k3s-worker2: + k3s-worker3: + # Storage group, these are my NAS devices + # hosts should match the filenames in 'host_vars' diff --git a/server/ansible/playbooks/k3s/install.yml b/server/ansible/playbooks/k3s/install.yml new file mode 100644 index 000000000..0bbd295e5 --- /dev/null +++ b/server/ansible/playbooks/k3s/install.yml @@ -0,0 +1,14 @@ +--- +- hosts: + - server-nodes + - worker-nodes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + pause: + seconds: 5 + roles: + - xanmanning.k3s + - k3s diff --git a/server/ansible/playbooks/k3s/nuke.yml b/server/ansible/playbooks/k3s/nuke.yml new file mode 100644 index 000000000..77596ba0b --- /dev/null +++ b/server/ansible/playbooks/k3s/nuke.yml @@ -0,0 +1,32 @@ +--- +- hosts: + - server-nodes + - worker-nodes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + pause: + seconds: 5 + tasks: + - name: kill k3s + ansible.builtin.command: /usr/local/bin/k3s-killall.sh + - name: uninstall k3s + ansible.builtin.command: + cmd: /usr/local/bin/k3s-uninstall.sh + removes: /usr/local/bin/k3s-uninstall.sh + - name: uninstall k3s agent + ansible.builtin.command: + cmd: /usr/local/bin/k3s-agent-uninstall.sh + removes: /usr/local/bin/k3s-agent-uninstall.sh + - name: gather list of CNI files to delete + find: + paths: /etc/cni/net.d + patterns: "*" + register: files_to_delete + - name: delete CNI files + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ files_to_delete.files }}" diff --git a/server/ansible/playbooks/k3s/upgrade.yml b/server/ansible/playbooks/k3s/upgrade.yml new file mode 100644 index 000000000..25c64a566 --- /dev/null +++ b/server/ansible/playbooks/k3s/upgrade.yml @@ -0,0 +1,13 @@ +--- +- hosts: + - server-nodes + - worker-nodes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + pause: + seconds: 5 + roles: + - xanmanning.k3s diff --git a/server/ansible/playbooks/power-outage/shutdown.yml b/server/ansible/playbooks/power-outage/shutdown.yml new file mode 100644 index 000000000..f598c5656 --- /dev/null +++ b/server/ansible/playbooks/power-outage/shutdown.yml @@ -0,0 +1,36 @@ +--- +- hosts: + - server-nodes + - worker-nodes + become: true + gather_facts: true + tasks: + # + # Turn off control-nodes and generic-nodes devices in 2 minutes + # + + - name: turn off control-nodes + # ansible.builtin.command: /sbin/shutdown -h 2 + ansible.builtin.command: /sbin/shutdown --help + when: "'control-nodes' in group_names" + + - name: turn off generic-nodes + # ansible.builtin.command: /sbin/shutdown -h 2 + ansible.builtin.command: /sbin/shutdown --help + when: "'generic-nodes' in group_names" + + # + # Turn off NAS devices in 5 minutes + # + + # Qnap devices do not have /sbin/shutdown and + # instead use busybox /sbin/poweroff + - name: turn off storage nodes + # ansible.builtin.command: /sbin/poweroff -d 300 + ansible.builtin.command: /sbin/poweroff --help + when: inventory_hostname == "nas-rocinante" + + - name: turn off storage nodes + # ansible.builtin.command: /sbin/shutdown -h 5 + ansible.builtin.command: /sbin/shutdown --help + when: inventory_hostname == "nas-serenity" diff --git a/server/ansible/playbooks/ubuntu/prepare.yml b/server/ansible/playbooks/ubuntu/prepare.yml new file mode 100644 index 000000000..b98be53e2 --- /dev/null +++ b/server/ansible/playbooks/ubuntu/prepare.yml @@ -0,0 +1,13 @@ +--- +- hosts: + - server-nodes + - worker-nodes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + pause: + seconds: 5 + roles: + - ubuntu diff --git a/server/ansible/playbooks/ubuntu/upgrade.yml b/server/ansible/playbooks/ubuntu/upgrade.yml new file mode 100644 index 000000000..06de291a0 --- /dev/null +++ b/server/ansible/playbooks/ubuntu/upgrade.yml @@ -0,0 +1,22 @@ +--- +- hosts: + - server-nodes + - worker-nodes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + pause: + seconds: 5 + tasks: + - name: upgrade + ansible.builtin.apt: + upgrade: full + update_cache: true + cache_valid_time: 3600 + autoclean: true + autoremove: true + register: apt_upgrade + retries: 5 + until: apt_upgrade is success diff --git a/server/ansible/requirements.txt b/server/ansible/requirements.txt new file mode 100644 index 000000000..d31198250 --- /dev/null +++ b/server/ansible/requirements.txt @@ -0,0 +1 @@ +jmespath==0.10.0 diff --git a/server/ansible/requirements.yml b/server/ansible/requirements.yml new file mode 100644 index 000000000..f42dbc4b4 --- /dev/null +++ b/server/ansible/requirements.yml @@ -0,0 +1,6 @@ +--- +roles: +- src: xanmanning.k3s + version: v2.8.0 +collections: +- name: community.general diff --git a/server/ansible/roles/k3s/tasks/calico.yml b/server/ansible/roles/k3s/tasks/calico.yml new file mode 100644 index 000000000..072153507 --- /dev/null +++ b/server/ansible/roles/k3s/tasks/calico.yml @@ -0,0 +1,38 @@ +--- +- name: cluster | calico | deploy tigera operator to k3s manifest directory + become: true + # run_once: true + ansible.builtin.get_url: + url: "{{ calico.operator_manifest }}" + dest: "{{ k3s_server_manifests_dir }}/tigera-operator.yaml" + mode: 0644 + +- name: cluster | calico | deploy configuration to k3s manifest directory + become: true + # run_once: true + ansible.builtin.template: + src: "calico-installation.yaml.j2" + dest: "{{ k3s_server_manifests_dir }}/calico-installation.yaml" + mode: 0644 + +- name: cluster | calico | deploy BGP-peer to k3s manifest directory + become: true + # run_once: true + ansible.builtin.template: + src: "calico-bgppeer.yaml.j2" + dest: "{{ k3s_server_manifests_dir }}/calico-bgppeer.yaml" + mode: 0644 + when: + - calico.bgp.enabled is defined + - calico.bgp.enabled + +- name: cluster | calico | deploy BGP-configuration to k3s manifest directory + become: true + # run_once: true + ansible.builtin.template: + src: "calico-bgpconfiguration.yaml.j2" + dest: "{{ k3s_server_manifests_dir }}/calico-bgpconfiguration.yaml" + mode: 0644 + when: + - calico.bgp.enabled is defined + - calico.bgp.enabled diff --git a/server/ansible/roles/k3s/tasks/kubeconfig.yml b/server/ansible/roles/k3s/tasks/kubeconfig.yml new file mode 100644 index 000000000..64af64551 --- /dev/null +++ b/server/ansible/roles/k3s/tasks/kubeconfig.yml @@ -0,0 +1,20 @@ +--- +- name: cluster | kubeconfig | copy config file to /tmp + become: true + run_once: true + ansible.builtin.fetch: + src: "/etc/rancher/k3s/k3s.yaml" + dest: "/tmp/kubeconfig" + flat: true + when: + - k3s_control_node is defined + - k3s_control_node + +- name: cluster | kubeconfig | update kubeconfig with the right IPv4 address + delegate_to: localhost + become: false + run_once: true + ansible.builtin.replace: + path: "/tmp/kubeconfig" + regexp: "https://127.0.0.1:6443" + replace: "https://{{ k3s_registration_address }}:6443" diff --git a/server/ansible/roles/k3s/tasks/main.yml b/server/ansible/roles/k3s/tasks/main.yml new file mode 100644 index 000000000..b5d5eea9d --- /dev/null +++ b/server/ansible/roles/k3s/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- include: kubeconfig.yml + tags: + - kubeconfig + +#- include: registry.yml +# when: mirror_registry is defined +# or (private_registries is defined +# and private_registries|length > 0) +# tags: +# - registry + +- include: calico.yml + when: + # - "'k8s-control-node-a' in inventory_hostname" + - k3s_control_node is defined + - k3s_control_node + - calico.enabled is defined + - calico.enabled + tags: + - calico diff --git a/server/ansible/roles/k3s/tasks/registry.yml b/server/ansible/roles/k3s/tasks/registry.yml new file mode 100644 index 000000000..1aaa726ef --- /dev/null +++ b/server/ansible/roles/k3s/tasks/registry.yml @@ -0,0 +1,21 @@ +--- +- name: cluster-registry | create /etc/rancher/k3s + become: true + ansible.builtin.file: + path: "/etc/rancher/k3s" + state: directory + mode: 0644 + +- name: cluster-registry | configure mirrors and custom registries + become: true + ansible.builtin.template: + src: "registries.yaml.j2" + dest: "/etc/rancher/k3s/registries.yaml" + mode: 0644 + +- name: cluster-registry | restart k3s systemd service + ansible.builtin.systemd: + name: k3s.service + daemon_reload: true + enabled: true + state: restarted diff --git a/server/ansible/roles/k3s/templates/calico-bgpconfiguration.yaml.j2 b/server/ansible/roles/k3s/templates/calico-bgpconfiguration.yaml.j2 new file mode 100644 index 000000000..89cd115a8 --- /dev/null +++ b/server/ansible/roles/k3s/templates/calico-bgpconfiguration.yaml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: crd.projectcalico.org/v1 +kind: BGPConfiguration +metadata: + name: default +spec: + serviceExternalIPs: + - cidr: {{ calico.bgp.externalIPs }} diff --git a/server/ansible/roles/k3s/templates/calico-bgppeer.yaml.j2 b/server/ansible/roles/k3s/templates/calico-bgppeer.yaml.j2 new file mode 100644 index 000000000..818ffb8ea --- /dev/null +++ b/server/ansible/roles/k3s/templates/calico-bgppeer.yaml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: crd.projectcalico.org/v1 +kind: BGPPeer +metadata: + name: global +spec: + peerIP: {{ calico.bgp.peer }} + asNumber: {{ calico.bgp.as }} diff --git a/server/ansible/roles/k3s/templates/calico-installation.yaml.j2 b/server/ansible/roles/k3s/templates/calico-installation.yaml.j2 new file mode 100644 index 000000000..2f1cea903 --- /dev/null +++ b/server/ansible/roles/k3s/templates/calico-installation.yaml.j2 @@ -0,0 +1,19 @@ +#jinja2:lstrip_blocks: True +--- +apiVersion: operator.tigera.io/v1 +kind: Installation +metadata: + name: default +spec: + calicoNetwork: + # Note: The ipPools section cannot be modified post-install. + ipPools: + - blockSize: 26 + cidr: "{{ k3s_server["cluster-cidr"] }}" + {% if calico.bgp.enabled is defined and calico.bgp.enabled %} + encapsulation: None + {% else %} + encapsulation: VXLANCrossSubnet + {% endif %} + natOutgoing: Enabled + nodeSelector: all() diff --git a/server/ansible/roles/k3s/templates/registries.yaml.j2 b/server/ansible/roles/k3s/templates/registries.yaml.j2 new file mode 100644 index 000000000..10d0bf508 --- /dev/null +++ b/server/ansible/roles/k3s/templates/registries.yaml.j2 @@ -0,0 +1,20 @@ +#jinja2:lstrip_blocks: True +--- +{% if mirror_registry is defined %} +mirrors: + "docker.io": + endpoint: + - "{{ mirror_registry.address }}" + "*": + endpoint: + - "{{ mirror_registry.address }}" +{% endif %} +{% if private_registries is defined and private_registries|length > 0 %} +configs: + {% for private_registry in private_registries %} + "{{ private_registry.address }}": + auth: + username: "{{ private_registry.username }}" + password: "{{ private_registry.password }}" + {% endfor %} +{% endif %} diff --git a/server/ansible/roles/ubuntu/tasks/boot.yml b/server/ansible/roles/ubuntu/tasks/boot.yml new file mode 100644 index 000000000..0a5bdd769 --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/boot.yml @@ -0,0 +1,43 @@ +--- +- name: boot | grub | check for existence of grub + ansible.builtin.stat: + path: /etc/default/grub + register: grub_result + +- name: boot | grub | set apparmor=0 + ansible.builtin.replace: + path: /etc/default/grub + regexp: '^(GRUB_CMDLINE_LINUX_DEFAULT=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$' + replace: '\1 {{ option }}={{ value }}\2' + vars: + option: apparmor + value: 0 + when: + - grub_result.stat.exists + +- name: boot | grub | set mitigations=off + ansible.builtin.replace: + path: /etc/default/grub + regexp: '^(GRUB_CMDLINE_LINUX_DEFAULT=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$' + replace: '\1 {{ option }}={{ value }}\2' + vars: + option: mitigations + value: "off" + when: + - grub_result.stat.exists + +- name: boot | grub | set pti=off + ansible.builtin.replace: + path: /etc/default/grub + regexp: '^(GRUB_CMDLINE_LINUX_DEFAULT=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$' + replace: '\1 {{ option }}={{ value }}\2' + vars: + option: pti + value: "off" + when: + - grub_result.stat.exists + +- name: boot | grub | run grub-mkconfig + ansible.builtin.command: grub-mkconfig -o /boot/grub/grub.cfg + when: + - grub_result.stat.exists diff --git a/server/ansible/roles/ubuntu/tasks/disks.yml b/server/ansible/roles/ubuntu/tasks/disks.yml new file mode 100644 index 000000000..f1acdbbad --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/disks.yml @@ -0,0 +1,19 @@ +--- +- name: disks | create directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: "0644" + when: disks is defined + loop: + - /mnt/ssd1 + - /mnt/ssd1/qbittorrent + +- name: disks | mount disks + ansible.posix.mount: + path: "/mnt/ssd1" + src: "UUID=558ddf99-61e8-4ac1-9819-adff7c8cc560" + fstype: ext4 + opts: defaults + state: present + when: disks is defined diff --git a/server/ansible/roles/ubuntu/tasks/filesystem.yml b/server/ansible/roles/ubuntu/tasks/filesystem.yml new file mode 100644 index 000000000..17a633cf5 --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/filesystem.yml @@ -0,0 +1,20 @@ +--- +- name: filesystem | sysctl | update max_user_watches + ansible.posix.sysctl: + name: fs.inotify.max_user_watches + value: "524288" + state: present + sysctl_file: /etc/sysctl.d/98-kubernetes-fs.conf + +- name: filesystem | swap | disable at runtime + ansible.builtin.command: swapoff -a + when: ansible_swaptotal_mb > 0 + +- name: filesystem | swap| disable on boot + ansible.posix.mount: + name: "{{ item }}" + fstype: swap + state: absent + loop: + - swap + - none diff --git a/server/ansible/roles/ubuntu/tasks/host.yml b/server/ansible/roles/ubuntu/tasks/host.yml new file mode 100644 index 000000000..3582ba57e --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/host.yml @@ -0,0 +1,6 @@ +--- +- name: host | hostname | update inventory hostname + ansible.builtin.hostname: + name: "{{ inventory_hostname }}" + when: + - ansible_hostname != inventory_hostname diff --git a/server/ansible/roles/ubuntu/tasks/kernel.yml b/server/ansible/roles/ubuntu/tasks/kernel.yml new file mode 100644 index 000000000..99a68c113 --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/kernel.yml @@ -0,0 +1,25 @@ +--- +- name: kernel | modules | enable at runtime + community.general.modprobe: + name: "{{ item }}" + state: present + loop: + - br_netfilter + - nf_conntrack + - overlay + - rbd + - ip_vs + - iscsi_tcp + +- name: kernel | modules | enable on boot + ansible.builtin.copy: + mode: 0644 + content: "{{ item }}" + dest: "/etc/modules-load.d/{{ item }}.conf" + loop: + - br_netfilter + - nf_conntrack + - overlay + - rbd + - ip_vs + - iscsi_tcp diff --git a/server/ansible/roles/ubuntu/tasks/locale.yml b/server/ansible/roles/ubuntu/tasks/locale.yml new file mode 100644 index 000000000..09e20ba62 --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/locale.yml @@ -0,0 +1,44 @@ +--- +- name: locale | set timezone + community.general.timezone: + name: "{{ timezone | default('Europe/Paris') }}" + +- name: locale | copy timesyncd config + ansible.builtin.copy: + mode: 0644 + content: | + [Time] + NTP={{ ntp_servers.primary | default("") | join(" ") }} + FallbackNTP={{ ntp_servers.fallback | join(" ") }} + dest: /etc/systemd/timesyncd.conf + when: + - ntp_servers.primary is defined + - ntp_servers.primary is iterable + - ntp_servers.primary | length > 0 + - ntp_servers.fallback is defined + - ntp_servers.fallback is iterable + - ntp_servers.fallback | length > 0 + +- name: locale | start systemd service + ansible.builtin.systemd: + name: systemd-timesyncd + enabled: true + state: started + +- name: locale | restart systemd service + ansible.builtin.systemd: + name: systemd-timesyncd + daemon_reload: true + enabled: true + state: restarted + +- name: locale | run timedatectl status + ansible.builtin.command: /usr/bin/timedatectl show + changed_when: false + check_mode: false + register: timedatectl_result + +- name: locale | enable ntp + ansible.builtin.command: /usr/bin/timedatectl set-ntp true + when: + - "'NTP=no' in timedatectl_result.stdout" diff --git a/server/ansible/roles/ubuntu/tasks/main.yml b/server/ansible/roles/ubuntu/tasks/main.yml new file mode 100644 index 000000000..21174212a --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/main.yml @@ -0,0 +1,51 @@ +--- +- include: host.yml + tags: + - host + +- include: locale.yml + tags: + - locale + +- include: packages.yml + tags: + - packages + +#- include: power-button.yml +# tags: +# - power-button + +- include: kernel.yml + tags: + - kernel + +- include: boot.yml + tags: + - boot + +- include: network.yml + tags: + - network + +- include: filesystem.yml + tags: + - filesystem + +- include: unattended-upgrades.yml + tags: + - unattended-upgrades + +- include: user.yml + tags: + - user + +- include: rsyslog.yml + when: + - rsyslog.enabled is defined + - rsyslog.enabled + tags: + - rsyslog + +- include: disks.yml + tags: + - disks diff --git a/server/ansible/roles/ubuntu/tasks/network.yml b/server/ansible/roles/ubuntu/tasks/network.yml new file mode 100644 index 000000000..287049661 --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/network.yml @@ -0,0 +1,23 @@ +--- +- name: network | check for bridge-nf-call-iptables + ansible.builtin.stat: + path: /proc/sys/net/bridge/bridge-nf-call-iptables + register: bridge_nf_call_iptables_result + +- name: network | sysctl | set config + ansible.builtin.blockinfile: + path: /etc/sysctl.d/99-kubernetes-cri.conf + mode: 0644 + create: true + block: | + net.ipv4.ip_forward = 1 + net.bridge.bridge-nf-call-iptables = 1 + when: + - bridge_nf_call_iptables_result.stat.exists + register: sysctl_network + +- name: network | sysctl | reload + ansible.builtin.shell: sysctl -p /etc/sysctl.d/99-kubernetes-cri.conf + when: + - sysctl_network.changed + - bridge_nf_call_iptables_result.stat.exists diff --git a/server/ansible/roles/ubuntu/tasks/packages.yml b/server/ansible/roles/ubuntu/tasks/packages.yml new file mode 100644 index 000000000..af98dded4 --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/packages.yml @@ -0,0 +1,94 @@ +--- +- name: packages | disable recommends + ansible.builtin.blockinfile: + path: /etc/apt/apt.conf.d/02norecommends + mode: 0644 + create: true + block: | + APT::Install-Recommends "false"; + APT::Install-Suggests "false"; + APT::Get::Install-Recommends "false"; + APT::Get::Install-Suggests "false"; + +- name: packages | upgrade all packages + ansible.builtin.apt: + upgrade: full + update_cache: true + cache_valid_time: 3600 + autoclean: true + autoremove: true + register: apt_upgrade + retries: 5 + until: apt_upgrade is success + when: + - (skip_upgrade_packages is not defined or (skip_upgrade_packages is defined and not skip_upgrade_packages)) + +- name: packages | install common + ansible.builtin.apt: + name: "{{ packages.apt_install }}" + install_recommends: false + update_cache: true + cache_valid_time: 3600 + autoclean: true + autoremove: true + register: apt_install_common + retries: 5 + until: apt_install_common is success + when: + - packages.apt_install is defined + - packages.apt_install is iterable + - packages.apt_install | length > 0 + +- name: packages | remove crufty packages + block: + - name: packages | remove crufty packages | gather install packages + ansible.builtin.package_facts: + manager: auto + when: + - "'snapd' in packages.apt_remove" + - name: packages | remove crufty packages | check if snap is installed + ansible.builtin.debug: + msg: "snapd is installed" + register: snapd_check + when: + - "'snapd' in packages.apt_remove" + - "'snapd' in ansible_facts.packages" + - name: packages | remove crufty packages | remove snap packages + + ansible.builtin.command: snap remove {{ item }} + loop: + - lxd + - core18 + - snapd + when: + - "'snapd' in packages.apt_remove" + - "'snapd' in ansible_facts.packages" + - snapd_check.failed is defined + - name: packages | remove crufty packages | remove packages + + ansible.builtin.apt: + name: "{{ packages.apt_remove }}" + state: absent + autoremove: true + - name: packages | remove crufty packages | remove crufty files + + ansible.builtin.file: + state: absent + path: "{{ item }}" + loop: + - "/home/{{ ansible_user }}/.snap" + - "/snap" + - "/var/snap" + - "/var/lib/snapd" + - "/var/cache/snapd" + - "/usr/lib/snapd" + - "/etc/cloud" + - "/var/lib/cloud" + when: + - "'snapd' in packages.apt_remove" + - "'cloud-init' in packages.apt_remove" + when: + - packages.apt_remove is defined + - packages.apt_remove is iterable + - packages.apt_remove | length > 0 + - (skip_remove_packages is not defined or (skip_remove_packages is defined and not skip_remove_packages)) diff --git a/server/ansible/roles/ubuntu/tasks/power-button.yml b/server/ansible/roles/ubuntu/tasks/power-button.yml new file mode 100644 index 000000000..18d93524a --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/power-button.yml @@ -0,0 +1,15 @@ +--- +- name: power-button | disable single power button press shutdown + ansible.builtin.lineinfile: + path: /etc/systemd/logind.conf + regexp: "{{ item.setting }}" + line: "{{ item.setting }}={{ item.value }}" + loop: + - { setting: HandlePowerKey, value: ignore } + +- name: power-button | restart logind systemd service + ansible.builtin.systemd: + name: systemd-logind.service + daemon_reload: true + enabled: true + state: restarted diff --git a/server/ansible/roles/ubuntu/tasks/rsyslog.yml b/server/ansible/roles/ubuntu/tasks/rsyslog.yml new file mode 100644 index 000000000..aa5ad4bbe --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/rsyslog.yml @@ -0,0 +1,19 @@ +--- +- name: rsyslog + block: + - name: rsyslog | copy promtail configuration + ansible.builtin.template: + src: "rsyslog-50-promtail.conf.j2" + dest: "/etc/rsyslog.d/50-promtail.conf" + mode: 0644 + - name: rsyslog | start systemd service + ansible.builtin.systemd: + name: rsyslog + enabled: true + state: started + - name: rsyslog | restart systemd service + ansible.builtin.systemd: + name: rsyslog.service + daemon_reload: true + enabled: true + state: restarted diff --git a/server/ansible/roles/ubuntu/tasks/unattended-upgrades.yml b/server/ansible/roles/ubuntu/tasks/unattended-upgrades.yml new file mode 100644 index 000000000..1f9ab677f --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/unattended-upgrades.yml @@ -0,0 +1,37 @@ +--- +- name: unattended-upgrades | copy 20auto-upgrades config + ansible.builtin.blockinfile: + path: /etc/apt/apt.conf.d/20auto-upgrades + mode: 0644 + create: true + block: | + APT::Periodic::Update-Package-Lists "14"; + APT::Periodic::Download-Upgradeable-Packages "14"; + APT::Periodic::AutocleanInterval "7"; + APT::Periodic::Unattended-Upgrade "1"; + +- name: unattended-upgrades | copy 50unattended-upgrades config + ansible.builtin.blockinfile: + path: /etc/apt/apt.conf.d/50unattended-upgrades + mode: 0644 + create: true + block: | + Unattended-Upgrade::Automatic-Reboot "false"; + Unattended-Upgrade::Remove-Unused-Dependencies "true"; + Unattended-Upgrade::Allowed-Origins { + "${distro_id}:${distro_codename}"; + "${distro_id} ${distro_codename}-security"; + }; + +- name: unattended-upgrades | start systemd service + ansible.builtin.systemd: + name: unattended-upgrades + enabled: true + state: started + +- name: unattended-upgrades | restart systemd service + ansible.builtin.service: + name: unattended-upgrades.service + daemon_reload: true + enabled: true + state: restarted diff --git a/server/ansible/roles/ubuntu/tasks/user.yml b/server/ansible/roles/ubuntu/tasks/user.yml new file mode 100644 index 000000000..4fc3309fd --- /dev/null +++ b/server/ansible/roles/ubuntu/tasks/user.yml @@ -0,0 +1,35 @@ +--- +- name: user | get home directory + ansible.builtin.shell: "echo $HOME" + changed_when: false + check_mode: no + register: user_home + +- name: user | add to sudoers + ansible.builtin.copy: + content: "{{ ansible_user }} ALL=(ALL:ALL) NOPASSWD:ALL" + dest: "/etc/sudoers.d/{{ ansible_user }}_nopasswd" + mode: "0440" + +- name: user | add additional SSH public keys + ansible.posix.authorized_key: + user: "{{ ansible_user }}" + key: "{{ item }}" + loop: "{{ ssh_authorized_keys }}" + when: + - ssh_authorized_keys is defined + - ssh_authorized_keys is iterable + - ssh_authorized_keys | length > 0 + +- name: user | check if hushlogin exists + ansible.builtin.stat: + path: "/{{ user_home.stdout }}/.hushlogin" + register: hushlogin_result + +- name: user | silence the login prompt + ansible.builtin.file: + dest: "/{{ user_home.stdout }}/.hushlogin" + state: touch + owner: "{{ ansible_user }}" + mode: "0775" + when: not hushlogin_result.stat.exists diff --git a/server/ansible/roles/ubuntu/templates/rsyslog-50-promtail.conf.j2 b/server/ansible/roles/ubuntu/templates/rsyslog-50-promtail.conf.j2 new file mode 100644 index 000000000..fa61c4e12 --- /dev/null +++ b/server/ansible/roles/ubuntu/templates/rsyslog-50-promtail.conf.j2 @@ -0,0 +1,4 @@ +module(load="omprog") +module(load="mmutf8fix") +action(type="mmutf8fix" replacementChar="?") +action(type="omfwd" protocol="tcp" target="{{ rsyslog.ip }}" port="{{ rsyslog.port }}" Template="RSYSLOG_SyslogProtocol23Format" TCP_Framing="octet-counted" KeepAlive="on") diff --git a/server/ansible/roles/ubuntu/vars/main.yml b/server/ansible/roles/ubuntu/vars/main.yml new file mode 100644 index 000000000..7426ec76d --- /dev/null +++ b/server/ansible/roles/ubuntu/vars/main.yml @@ -0,0 +1,69 @@ +--- +packages: + apt_install: + - apt-transport-https + - arptables + - ca-certificates + - curl + # - dnsutils + - ebtables + # - ethtool + # - git + # - gnupg-agent + # - gnupg2 + # - haveged + - hdparm + - htop + # - iperf3 + - iputils-ping + - ipvsadm + # - jq + - lvm2 + # - neofetch + - net-tools + # - netcat + - nfs-common + - nano + # - nmap + - ntpdate + - open-iscsi + # - pigz + - psmisc + # - python3 + # - python3-openssl + # - python3-pip + # - rclone + # - rsync + # - scsitools + - smartmontools + - socat + - software-properties-common + # - traceroute + # - tree + - unattended-upgrades + - unzip + # - vim + apt_remove: + - apparmor + - apport + - bcache-tools + - btrfs-progs + - byobu + - cloud-init + - cloud-guest-utils + - cloud-initramfs-copymods + - cloud-initramfs-dyn-netconf + - friendly-recovery + - fwupd + - landscape-common + - lxd-agent-loader + - ntfs-3g + - open-vm-tools + - plymouth + - plymouth-theme-ubuntu-text + - popularity-contest + - snapd + - sosreport + - tmux + - ubuntu-advantage-tools + - ufw diff --git a/server/ansible/roles/xanmanning.k3s/.ansible-lint b/server/ansible/roles/xanmanning.k3s/.ansible-lint new file mode 100644 index 000000000..32133767e --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/.ansible-lint @@ -0,0 +1,4 @@ +--- + +skip_list: + - role-name diff --git a/server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/bug_report.md b/server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..5a1862ced --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,55 @@ +--- +name: Bug report +about: Create a report to help us improve +--- + + + + +### Summary + + + +### Issue Type + +- Bug Report + +### Controller Environment and Configuration + + + + + + + +```text + +``` + +### Steps to Reproduce + + + + + +```yaml + +``` + +### Expected Result + + + +```text + +``` + +### Actual Result + + + + + +```text + +``` diff --git a/server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/config.yml b/server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..8b76b046a --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,3 @@ +--- + +blank_issues_enabled: true diff --git a/server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/feature_request.md b/server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..d3f15a67c --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,33 @@ +--- +name: Feature request +about: Suggest an idea for this project +--- + + + + +### Summary + + + +### Issue Type + +- Feature Request + +### User Story + + + + +_As a_ \ +_I want to_ \ +_So that_ + +### Additional Information + + + + +```yaml + +``` diff --git a/server/ansible/roles/xanmanning.k3s/.github/PULL_REQUEST_TEMPLATE.md b/server/ansible/roles/xanmanning.k3s/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..620f20efc --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,37 @@ +## TITLE + +### Summary + + + + + +### Issue type + + +- Bugfix +- Documentation +- Feature + +### Test instructions + + + +### Acceptance Criteria + + + + + +### Additional Information + + + + + +```text + +``` diff --git a/server/ansible/roles/xanmanning.k3s/.github/stale.yml b/server/ansible/roles/xanmanning.k3s/.github/stale.yml new file mode 100644 index 000000000..82c0d0030 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/.github/stale.yml @@ -0,0 +1,18 @@ +--- +# Number of days of inactivity before an issue becomes stale +daysUntilStale: 60 +# Number of days of inactivity before a stale issue is closed +daysUntilClose: 7 +# Issues with these labels will never be considered stale +exemptLabels: + - pinned + - security +# Label to use when marking an issue as stale +staleLabel: wontfix +# Comment to post when marking an issue as stale. Set to `false` to disable +markComment: > + This issue has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. Thank you + for your contributions. +# Comment to post when closing a stale issue. Set to `false` to disable +closeComment: false diff --git a/server/ansible/roles/xanmanning.k3s/.github/workflows/ci.yml b/server/ansible/roles/xanmanning.k3s/.github/workflows/ci.yml new file mode 100644 index 000000000..9d3170557 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/.github/workflows/ci.yml @@ -0,0 +1,65 @@ +--- + +name: CI +'on': + pull_request: + push: + branches: + - master + - main + - v1_release + schedule: + - cron: "0 1 1 * *" + +defaults: + run: + working-directory: "xanmanning.k3s" + +jobs: + molecule: + name: Molecule + runs-on: ubuntu-latest + strategy: + matrix: + include: + - distro: debian10 + scenario: default + - distro: ubuntu2004 + scenario: default + - distro: amazonlinux2 + scenario: default + - distro: centos7 + scenario: default + - distro: ubuntu1804 + scenario: default + - distro: fedora31 + scenario: nodeploy + - distro: fedora29 + scenario: highavailabilitydb + - distro: fedora30 + scenario: autodeploy + - distro: debian9 + scenario: highavailabilityetcd + - distro: centos8 + scenario: highavailabilityetcd + + steps: + - name: Checkout codebase + uses: actions/checkout@v2 + with: + path: "xanmanning.k3s" + + - name: Set up Python 3 + uses: actions/setup-python@v2 + with: + python-version: "3.x" + + - name: Install test dependencies + run: pip3 install -r molecule/requirements.txt + + - name: Run Molecule tests + run: molecule test --scenario-name "${{ matrix.scenario }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + MOLECULE_DISTRO: ${{ matrix.distro }} diff --git a/server/ansible/roles/xanmanning.k3s/.github/workflows/release.yml b/server/ansible/roles/xanmanning.k3s/.github/workflows/release.yml new file mode 100644 index 000000000..22a7d22b7 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/.github/workflows/release.yml @@ -0,0 +1,32 @@ +--- + +name: Release +'on': + push: + tags: + - '*' + +defaults: + run: + working-directory: "xanmanning.k3s" + +jobs: + release: + name: Release + runs-on: ubuntu-latest + steps: + - name: Checkout codebase + uses: actions/checkout@v2 + with: + path: "xanmanning.k3s" + + - name: Set up Python 3 + uses: actions/setup-python@v2 + with: + python-version: "3.x" + + - name: Install Ansible + run: pip3 install -r requirements.txt + + - name: Trigger a new import on Galaxy + run: ansible-galaxy role import --api-key ${{ secrets.GALAXY_API_KEY }} $(echo ${{ github.repository }} | cut -d/ -f1) $(echo ${{ github.repository }} | cut -d/ -f2) diff --git a/server/ansible/roles/xanmanning.k3s/.gitignore b/server/ansible/roles/xanmanning.k3s/.gitignore new file mode 100644 index 000000000..903a36e92 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/.gitignore @@ -0,0 +1,12 @@ +.vagrant +*.retry +VAULT_PASSWORD +VAULT_PASS +.vault_pass +.vault_pass.asc +vagramt/fetch +vagrant/ubuntu-*.log +__pycache__ +ansible.cfg +pyratlabs-issue-dump.txt +.cache diff --git a/server/ansible/roles/xanmanning.k3s/.yamllint b/server/ansible/roles/xanmanning.k3s/.yamllint new file mode 100644 index 000000000..882767605 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/.yamllint @@ -0,0 +1,33 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: disable + key-duplicates: enable + line-length: disable + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/server/ansible/roles/xanmanning.k3s/CHANGELOG.md b/server/ansible/roles/xanmanning.k3s/CHANGELOG.md new file mode 100644 index 000000000..ad7710036 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/CHANGELOG.md @@ -0,0 +1,304 @@ +# Change Log + + + +## 2021-03-14, v2.8.0 + +Happy π day! + +### Notable changes + + - Updated GitHub Actions, resolved linting errors. + - Renamed `k3s_control_node_address` -> `k3s_registration_address` + +### Breaking changes + + - A task has been added to rename `k3s_control_node_address` to + `k3s_registration_address` for any users still using this variable name, + however this might still break something. + +--- + +## 2021-02-28, v2.7.1 + +### Notable changes + + - Bugfix, missing become on cluster token check. + +--- + +## 2021-02-27, v2.7.0 + +### Notable changes + + - Cluster init checks added. + - Tidy up of tasks, failed checks. + - Possible fix for #93 - force draining of nodes added. + +--- + +## 2021-02-27, v2.6.1 + +### Notable changes + + - Bugfix: Templating error for single control plane nodes using Etcd. + - Bugfix: a number of typos fixed. + +--- + +## 2021-02-16, v2.6.0 + +### Notable changes + + - Tidy up of `when` params and `assert` tasks to be more readable. + - Added feature to tweak K3S service dependencies. + - Updated documentation: + - Node labels and component arguments + - systemd config + - Use alternate CNI (Calico example) + +--- + +## 2021-01-31, v2.5.3 + +### Notable changes + + - Bugfix, missing update to minimum ansible version var #91. + +--- + +## 2021-01-30, v2.5.2 + +### Notable changes + + - Bugfix, missing `k3s_start_on_boot` to control `systemd.enabled` added. + +--- + +## 2021-01-30, v2.5.1 + +### Notable changes + + - Added uninstall task to remove hard-linked files #88 + - Fixed missing become for `systemd` operations tasks. #89 + - Added `k3s_start_on_boot` to control `systemd.enabled`. + +--- + +## 2021-01-24, v2.5.0 + +### Notable changes + + - Added support for Ansible >= 2.9.17 #83 + +--- + +## 2021-01-23, v2.4.3 + +### Notable changes + + - Bufgix: Installation hangs on "Check that all nodes to be ready" #84 + +--- + +## 2021-01-10, v2.4.2 + +### Notable changes + + - Bufgix: Docker check still failing on "false" + +--- + +## 2021-01-02, v2.4.1 + +### Notable changes + + - Fixed issue with armv6l (Raspberry Pi Zero W) + - Added path for private repositories config to directory creation list. + +--- + +## 2020-12-21, v2.4.0 + +### Notable changes + + - `k3s_config_dir` derived from `k3s_config_file`, reused throughout the role + to allow for easy removal of "Rancher" references #73. + - `k3s_token_location` has moved to be in `k3s_config_dir`. + - Tasks for creating directories now looped to caputure configuration from + `k3s_server` and `k3s_agent` and ensure directories exist before k3s + starts, see #75. + - Server token collected directly from token file, not symlinked file + (node-token). + - `k3s_runtime_config` defined in `vars/` for validation and overwritten in + tasks for control plane and workers. + - Removed unused references to GitHub API. + - `set_fact` and `command` tasks now use FQCN. + - Check of `ansible_version` in environment check. + - Introduction of target environment checks for #72. + - Fixed bug with non-default listening port not being passed to workers. + - Added ability to put documentation links into validation checks #76. + - Removed the requirement for `jmespath` on the Ansible controller. + - Fixed bug with issue data collection tasks. + +### Breaking changes + + - Ansible minimum version is hard set to v2.10.4 + - `k3s_token_location` has moved to be in `k3s_config_dir` so re-running the + role will create a duplicate file here. + +--- + +## 2020-12-19, v2.3.0 + +### Notable changes + + - Updated k3s uninstall scripts #74 + - Started moving Rancher references to `vars/` as per #73 + +--- + +## 2020-12-19, v2.2.2 + +### Notable changes + + - Fixed typos in documentation. + - Molecule testing pinned to v3.1 due to tests failing. + +--- + +## 2020-12-16, v2.2.1 + +### Notable changes + + - Re-working documentation + - Updated GitHub link, org changed from Rancher to k3s-io. + - Replace deprecated `play_hosts` variable. + +### Breaking changes + + - Moving git branch from `master` to `main`. + +--- + +## 2020-12-12, v2.2.0 + +### Notable changes + + - Use of FQCNs enforced, minimum Ansible version now v2.10 + - `k3s_etcd_datastore` no longer experimental after K3s version v1.19.5+k3s1 + - Docker marked as deprecated for K3s > v1.20.0+k3s1 + +### Breaking changes + + - Use of FQCNs enforced, minimum Ansible version now v2.10 + - Use of Docker requires `k3s_use_unsupported_config` to be `true` after + v1.20.0+k3s1 + +--- + +## 2020-12-05, v2.1.1 + +### Notable changes + + - Fixed link to documentation. + +--- + +## 2020-12-05, v2.1.0 + +### Notable changes + + - Deprecated configuration check built into validation steps. + - Removed duplicated tasks for single node cluster. + - Added documentation providing quickstart examples and common operations. + - Fixed data-dir configuration. + - Some tweaks to rootless. + - Fix draining and removing of nodes. + +### Breaking changes + + - `k3s_token_location` now points to a file location, not a directory. + - `k3s_systemd_unit_directory` renamed to `k3s_systemd_unit_dir` + - Removed `k3s_node_data_dir` as this is now configured with `data-dir` in + `k3s_server` and/or `k3s_agent`. + +### Known issues + + - Rootless is still broken, this is still not supported as a method for + running k3s using this role. + +--- + +## 2020-11-30, v2.0.2 + +### Notable changes + + - Updated issue template and information collection tasks. + +--- + +## 2020-11-30, v2.0.1 + +### Notable changes + + - Fixed a number of typos in the README.md + - Updated the meta/main.yml to put quotes around minimum Ansible version. + +--- + +## 2020-11-29, v2.0.0 + +### Notable changes + + - #64 - Initial release of v2.0.0 of + [ansible-role-k3s](https://github.com/PyratLabs/ansible-role-k3s). + - Minimum supported k3s version now: v1.19.1+k3s1 + - Minimum supported Ansible version now: v2.10.0 + - #62 - Remove all references to the word "master". + - #53 - Move to file-based configuration. + - Refactored to avoid duplication in code and make contribution easier. + - Validation checks moved to using variables defined in `vars/` + +### Breaking changes + +#### File based configuration + +Issue #53 + +With the release of v1.19.1+k3s1, this role has moved to file-based +configuration of k3s. This requires manuall translation of v1 configuration +variables into configuration file format. + +Please see: https://rancher.com/docs/k3s/latest/en/installation/install-options/#configuration-file + +#### Minimum supported k3s version + +As this role now relies on file-based configuration, the v2.x release of this +role will only support v1.19+ of k3s. If you are not in a position to update +k3s you will need to continue using the v1.x release of this role, which will +be supported until March 2021. + +#### Minimum supported ansible version + +This role now only supports Ansible v2.10+, this is because it has moved on to +using FQDNs, with the exception of `set_fact` tasks which have +[been broken](https://github.com/ansible/ansible/issues/72319) and the fixes +have [not yet been backported to v2.10](https://github.com/ansible/ansible/pull/71824). + +The use of FQDNs allows for custom modules to be introduced to override task +behavior. If this role requires a custom ansible module to be introduced then +this can be added as a dependency and targeted specifically by using the +correct FQDN. diff --git a/server/ansible/roles/xanmanning.k3s/CONTRIBUTING.md b/server/ansible/roles/xanmanning.k3s/CONTRIBUTING.md new file mode 100644 index 000000000..da7f6acce --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/CONTRIBUTING.md @@ -0,0 +1,46 @@ +# Contribution Guidelines + +Thank you for taking time to contribute to this Ansible role. + +There are a number of ways that you can contribute to this project, not all of +them requiring you to be able to write code. Below is a list of suggested +contributions welcomed by the community: + + - Submit bug reports in GitHub issues + - Comment on bug reports with futher information or suggestions + - Suggest new features + - Create Pull Requests fixing bugs or adding new features + - Update and improve documentation + - Review the role on Ansible Galaxy + - Write a blog post reviewing the role + - Sponsor me. + +## Issue guidelines + +Issues are the best way to capture an bug in the role, or suggest new features. +This is due to issues being visible to the entire community and allows for +other contributors to pick up the work, so is a better communication medium +than email. + +A good bug issue will include as much information as possible about the +environment Ansible is running in, as well as the role configuration. If there +are any relevant pieces of documentation from upstream projects, this should +be included. + +New feature requests are also best captured in issues, these should include +as much relevant information as possible and if possible include a "user story" +(don't sweat if you don't know how to write one). If there are any relevant +pieces of documentation from upstream projects, this should be included. + +## Pull request guidelines + +PRs should only contain 1 issue fix at a time to limit the scope of testing +required. The smaller the scope of the PR, the easier it is for it to be +reviewed. + +PRs should include the keyword `Fixes` before an issue number if the PR will +completely close the issue. This is because automation will close the issue +once the PR is merged. + +PRs are preferred to be merged in as a single commit, so rebasing before +pushing is recommended, however this isn't a strict rule. diff --git a/server/ansible/roles/xanmanning.k3s/LICENSE.txt b/server/ansible/roles/xanmanning.k3s/LICENSE.txt new file mode 100644 index 000000000..af447bb96 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/LICENSE.txt @@ -0,0 +1,30 @@ +BSD 3-Clause License + +Copyright (c) 2020, Xan Manning +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/server/ansible/roles/xanmanning.k3s/README.md b/server/ansible/roles/xanmanning.k3s/README.md new file mode 100644 index 000000000..dc839367d --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/README.md @@ -0,0 +1,344 @@ +# Ansible Role: k3s (v2.x) + +Ansible role for installing [K3S](https://k3s.io/) ("Lightweight +Kubernetes") as either a standalone server or cluster. + +[![CI](https://github.com/PyratLabs/ansible-role-k3s/workflows/CI/badge.svg?event=push)](https://github.com/PyratLabs/ansible-role-k3s/actions?query=workflow%3ACI) + +## Release notes + +Please see [Releases](https://github.com/PyratLabs/ansible-role-k3s/releases) +and [CHANGELOG.md](CHANGELOG.md). + +## Requirements + +The host you're running Ansible from requires the following Python dependencies: + + - `ansbile >= 2.9.17` or `ansible-base >= 2.10.4` + +You can install dependencies using the requirements.txt file in this repository: +`pip3 install -r requirements.txt`. + +This role has been tested against the following Linux Distributions: + + - Amazon Linux 2 + - Archlinux + - CentOS 8 + - CentOS 7 + - Debian 9 + - Debian 10 + - Fedora 29 + - Fedora 30 + - Fedora 31 + - Fedora 32 + - openSUSE Leap 15 + - Ubuntu 18.04 LTS + - Ubuntu 20.04 LTS + +:warning: The v2 releases of this role only supports `k3s >= v1.19`, for +`k3s < v1.19` please consider updating or use the v1.x releases of this role. + +Before upgrading, see [CHANGELOG](CHANGELOG.md) for notifications of breaking +changes. + +## Role Variables + +Since K3s [v1.19.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.19.1%2Bk3s1) +you can now configure K3s using a +[configuration file](https://rancher.com/docs/k3s/latest/en/installation/install-options/#configuration-file) +rather than environment variables or command line arguments. The v2 release of +this role has moved to the configuration file method rather than populating a +systemd unit file with command-line arguments. There may be exceptions that are +defined in [Global/Cluster Variables](#globalcluster-variables), however you will +mostly be configuring k3s by configuration files using the `k3s_server` and +`k3s_agent` variables. + +See "_Server (Control Plane) Configuration_" and "_Agent (Worker) Configuraion_" +below. + +### Global/Cluster Variables + +Below are variables that are set against all of the play hosts for environment +consistency. These are generally cluster-level configuration. + +| Variable | Description | Default Value | +|----------------------------------|---------------------------------------------------------------------------------|--------------------------------| +| `k3s_state` | State of k3s: installed, started, stopped, downloaded, uninstalled, validated. | installed | +| `k3s_release_version` | Use a specific version of k3s, eg. `v0.2.0`. Specify `false` for stable. | `false` | +| `k3s_config_file` | Location of the k3s configuration file. | `/etc/rancher/k3s/config.yaml` | +| `k3s_build_cluster` | When multiple play hosts are available, attempt to cluster. Read notes below. | `true` | +| `k4s_registration_address` | Fixed registration address for nodes. IP or FQDN. | NULL | +| `k3s_github_url` | Set the GitHub URL to install k3s from. | https://github.com/k3s-io/k3s | +| `k3s_install_dir` | Installation directory for k3s. | `/usr/local/bin` | +| `k3s_install_hard_links` | Install using hard links rather than symbolic links. | `false` | +| `k3s_server_manifests_templates` | A list of Auto-Deploying Manifests Templates. | [] | +| `k3s_use_experimental` | Allow the use of experimental features in k3s. | `false` | +| `k3s_use_unsupported_config` | Allow the use of unsupported configurations in k3s. | `false` | +| `k3s_etcd_datastore` | Enable etcd embedded datastore (read notes below). | `false` | +| `k3s_debug` | Enable debug logging on the k3s service. | `false` | + +### K3S Service Configuration + +The below variables change how and when the systemd service unit file for K3S +is run. Use this with caution, please refer to the [systemd documentation](https://www.freedesktop.org/software/systemd/man/systemd.unit.html#%5BUnit%5D%20Section%20Options) +for more information. + +| Variable | Description | Default Value | +|------------------------|----------------------------------------------------------------|---------------| +| `k3s_start_on_boot` | Start k3s on boot. | `true` | +| `k3s_service_requires` | List of required systemd units to k3s service unit. | [] | +| `k3s_service_wants` | List of "wanted" systemd unit to k3s (weaker than "requires"). | []\* | +| `k3s_service_before` | Start k3s before a defined list of systemd units. | [] | +| `k3s_service_after` | Start k3s after a defined list of systemd units. | []\* | + +\* The systemd unit template **always** specifies `network-online.target` for +`wants` and `after`. + +### Group/Host Variables + +Below are variables that are set against individual or groups of play hosts. +Typically you'd set these at group level for the control plane or worker nodes. + +| Variable | Description | Default Value | +|--------------------|-------------------------------------------------------------------|---------------------------------------------------| +| `k3s_control_node` | Specify if a host (or host group) are part of the control plane. | `false` (role will automatically delegate a node) | +| `k3s_server` | Server (control plane) configuration, see notes below. | `{}` | +| `k3s_agent` | Agent (worker) configuration, see notes below. | `{}` | + +#### Server (Control Plane) Configuration + +The control plane is configured with the `k3s_server` dict variable. Please +refer to the below documentation for configuration options: + +https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/ + +The `k3s_server` dictionary variable will contain flags from the above +(removing the `--` prefix). Below is an example: + +```yaml +k3s_server: + datastore-endpoint: postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable + docker: true + cluster-cidr: 172.20.0.0/16 + flannel-backend: 'none' # This needs to be in quotes + disable: + - traefik + - coredns +``` + +Alternatively, you can create a .yaml file and read it in to the `k3s_server` +variable as per the below example: + +```yaml +k3s_server: "{{ lookup('file', 'path/to/k3s_server.yml') | from_yaml }}" +``` + +Check out the [Documentation](documentation/README.md) for example +configuration. + +#### Agent (Worker) Configuration + +Workers are configured with the `k3s_agent` dict variable. Please refer to the +below documentation for configuration options: + +https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config + +The `k3s_agent` dictionary variable will contain flags from the above +(removing the `--` prefix). Below is an example: + +```yaml +k3s_agent: + with-node-id: true + node-label: + - "foo=bar" + - "hello=world" +``` + +Alternatively, you can create a .yaml file and read it in to the `k3s_agent` +variable as per the below example: + +```yaml +k3s_agent: "{{ lookup('file', 'path/to/k3s_agent.yml') | from_yaml }}" +``` + +Check out the [Documentation](documentation/README.md) for example +configuration. + +### Ansible Controller Configuration Variables + +The below variables are used to change the way the role executes in Ansible, +particularly with regards to privilege escalation. + +| Variable | Description | Default Value | +|-------------------------------------|---------------------------------------------------------------------|---------------| +| `k3s_skip_validation` | Skip all tasks that validate configuration. | `false` | +| `k3s_skip_env_checks` | Skill all tasks that check environment configuration. | `false` | +| `k3s_become_for_all` | Escalate user privileges for all tasks. Overrides all of the below. | `false` | +| `k3s_become_for_systemd` | Escalate user privileges for systemd tasks. | NULL | +| `k3s_become_for_install_dir` | Escalate user privileges for creating installation directories. | NULL | +| `k3s_become_for_directory_creation` | Escalate user privileges for creating application directories. | NULL | +| `k3s_become_for_usr_local_bin` | Escalate user privileges for writing to `/usr/local/bin`. | NULL | +| `k3s_become_for_package_install` | Escalate user privileges for installing k3s. | NULL | +| `k3s_become_for_kubectl` | Escalate user privileges for running `kubectl`. | NULL | +| `k3s_become_for_uninstall` | Escalate user privileges for uninstalling k3s. | NULL | + +#### Important note about `k3s_release_version` + +If you do not set a `k3s_release_version` the latest version from the stable +channel of k3s will be installed. If you are developing against a specific +version of k3s you must ensure this is set in your Ansible configuration, eg: + +```yaml +k3s_release_version: v1.19.3+k3s1 +``` + +It is also possible to install specific K3s "Channels", below are some +examples for `k3s_release_version`: + +```yaml +k3s_release_version: false # defaults to 'stable' channel +k3s_release_version: stable # latest 'stable' release +k3s_release_version: testing # latest 'testing' release +k3s_release_version: v1.19 # latest 'v1.19' release +k3s_release_version: v1.19.3+k3s3 # specific release + +# Specific commit +# CAUTION - only used for testing - must be 40 characters +k3s_release_version: 48ed47c4a3e420fa71c18b2ec97f13dc0659778b +``` + +#### Important note about `k3s_install_hard_links` + +If you are using the [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller) +you will need to use hard links rather than symbolic links as the controller +will not be able to follow symbolic links. This option has been added however +is not enabled by default to avoid breaking existing installations. + +To enable the use of hard links, ensure `k3s_install_hard_links` is set +to `true`. + +```yaml +k3s_install_hard_links: true +``` + +The result of this can be seen by running the following in `k3s_install_dir`: + +`ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort` + +Symbolic Links: + +```text +[root@node1 bin]# ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort +3277823 -rwxr-xr-x 1 root root 52M Jul 25 12:50 k3s-v1.18.4+k3s1 +3279565 lrwxrwxrwx 1 root root 31 Jul 25 12:52 k3s -> /usr/local/bin/k3s-v1.18.6+k3s1 +3279644 -rwxr-xr-x 1 root root 51M Jul 25 12:52 k3s-v1.18.6+k3s1 +3280079 lrwxrwxrwx 1 root root 31 Jul 25 12:52 ctr -> /usr/local/bin/k3s-v1.18.6+k3s1 +3280080 lrwxrwxrwx 1 root root 31 Jul 25 12:52 crictl -> /usr/local/bin/k3s-v1.18.6+k3s1 +3280081 lrwxrwxrwx 1 root root 31 Jul 25 12:52 kubectl -> /usr/local/bin/k3s-v1.18.6+k3s1 +``` + +Hard Links: + +```text +[root@node1 bin]# ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort +3277823 -rwxr-xr-x 1 root root 52M Jul 25 12:50 k3s-v1.18.4+k3s1 +3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 crictl +3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 ctr +3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 k3s +3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 k3s-v1.18.6+k3s1 +3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 kubectl +``` + +#### Important note about `k3s_build_cluster` + +If you set `k3s_build_cluster` to `false`, this role will install each play +host as a standalone node. An example of when you might use this would be +when building a large number of standalone IoT devices running K3s. Below is a +hypothetical situation where we are to deploy 25 Raspberry Pi devices, each a +standalone system and not a cluster of 25 nodes. To do this we'd use a playbook +similar to the below: + +```yaml +- hosts: k3s_nodes # eg. 25 RPi's defined in our inventory. + vars: + k3s_build_cluster: false + roles: + - xanmanning.k3s +``` + +#### Important note about `k3s_control_node` and High Availability (HA) + +By default only one host will be defined as a control node by Ansible, If you +do not set a host as a control node, this role will automatically delegate +the first play host as a control node. This is not suitable for use within +a Production workload. + +If multiple hosts have `k3s_control_node` set to `true`, you must also set +`datastore-endpoint` in `k3s_server` as the connection string to a MySQL or +PostgreSQL database, or external Etcd cluster else the play will fail. + +If using TLS, the CA, Certificate and Key need to already be available on +the play hosts. + +See: [High Availability with an External DB](https://rancher.com/docs/k3s/latest/en/installation/ha/) + +It is also possible, though not supported, to run a single K3s control node +with a `datastore-endpoint` defined. As this is not a typically supported +configuration you will need to set `k3s_use_unsupported_config` to `true`. + +Since K3s v1.19.1 it is possible to use an embedded Etcd as the backend +database, and this is done by setting `k3s_etcd_datastore` to `true`. +The best practice for Etcd is to define at least 3 members to ensure quorum is +established. In addition to this, an odd number of members is recommended to +ensure a majority in the event of a network partition. If you want to use 2 +members or an even number of members, please set `k3s_use_unsupported_config` +to `true`. + +## Dependencies + +No dependencies on other roles. + +## Example Playbooks + +Example playbook, single control node running `testing` channel k3s: + +```yaml +- hosts: k3s_nodes + roles: + - { role: xanmanning.k3s, k3s_release_version: testing } +``` + +Example playbook, Highly Available with PostgreSQL database running the latest +stable release: + +```yaml +- hosts: k3s_nodes + vars: + k3s_registration_address: loadbalancer # Typically a load balancer. + k3s_server: + datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable" + pre_tasks: + - name: Set each node to be a control node + ansible.builtin.set_fact: + k3s_control_node: true + when: inventory_hostname in ['node2', 'node3'] + roles: + - role: xanmanning.k3s +``` + +## License + +[BSD 3-clause](LICENSE.txt) + +## Contributors + +Contributions from the community are very welcome, but please read the +[contribution guidelines](CONTRIBUTING.md) before doing so, this will help +make things as streamlined as possible. + +Also, please check out the awesome +[list of contributors](https://github.com/PyratLabs/ansible-role-k3s/graphs/contributors). + +## Author Information + +[Xan Manning](https://xan.manning.io/) diff --git a/server/ansible/roles/xanmanning.k3s/defaults/main.yml b/server/ansible/roles/xanmanning.k3s/defaults/main.yml new file mode 100644 index 000000000..b599ee819 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/defaults/main.yml @@ -0,0 +1,99 @@ +--- + +## +# Global/Cluster Configuration +## + +# k3s state, options: installed, started, stopped, restarted, uninstalled, validated +# (default: installed) +k3s_state: installed + +# Use a specific k3s version, if set to "false" we will get the latest +# k3s_release_version: v1.19.3 +k3s_release_version: false + +# Loction of the k3s configuration file +k3s_config_file: /etc/rancher/k3s/config.yaml + +# When multiple ansible_play_hosts_all are present, attempt to cluster the nodes. +# Using false will create multiple standalone nodes. +# (default: true) +k3s_build_cluster: true + +# URL for GitHub project +k3s_github_url: https://github.com/k3s-io/k3s + +# Skip all tasks that validate configuration +k3s_skip_validation: false + +# Skip all tasks that check environment configuration +k3s_skip_env_checks: false + +# Installation directory for k3s +k3s_install_dir: /usr/local/bin + +# Install using hard links rather than symbolic links +k3s_install_hard_links: false + +# A list of templates used for preconfigure the cluster. +k3s_server_manifests_templates: [] + +# Use experimental features in k3s? +k3s_use_experimental: false + +# Allow for unsupported configurations in k3s? +k3s_use_unsupported_config: false + +# Enable etcd embedded datastore +k3s_etcd_datastore: false + +## +# Systemd config +## + +# Start k3s on system boot +k3s_start_on_boot: true + +# List of required systemd units to k3s service unit. +k3s_service_requires: [] + +# List of "wanted" systemd unit to k3s (weaker than "requires"). +k3s_service_wants: [] + +# Start k3s before a defined list of systemd units. +k3s_service_before: [] + +# Start k3s after a defined list of systemd units. +k3s_service_after: [] + +## +# Server Configuration +## + +k3s_server: {} +# k3s_server: +# listen-port: 6443 + +## +# Agent Configuration +## + +k3s_agent: {} +# k3s_agent: +# node-label: +# - "foo=bar" +# - "bish=bosh" + +## +# Ansible Controller configuration +## + +# Use become privileges for +k3s_become_for_all: false +k3s_become_for_systemd: null +k3s_become_for_install_dir: null +k3s_become_for_directory_creation: null +k3s_become_for_usr_local_bin: null +k3s_become_for_package_install: null +k3s_become_for_kubectl: null +k3s_become_for_uninstall: null diff --git a/server/ansible/roles/xanmanning.k3s/documentation/README.md b/server/ansible/roles/xanmanning.k3s/documentation/README.md new file mode 100644 index 000000000..24cdbb996 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/README.md @@ -0,0 +1,43 @@ +# ansible-role-k3s + +This document describes a number of ways of consuming this Ansible role for use +in your own k3s deployments. It will not be able to cover every use case +scenario but will provide some common example configurations. + +## Requirements + +Before you start you will need an Ansible controller. This can either be your +workstation, or a dedicated system that you have access to. The instructions +in this documentation assume you are using `ansible` CLI, there are no +instructions available for Ansible Tower at this time. + +Follow the below guide to get Ansible installed. + +https://docs.ansible.com/ansible/latest/installation_guide/index.html + +## Quickstart + +Below are quickstart examples for a single node k3s server, a k3s cluster +with a single control node and HA k3s cluster. These represent the bare +minimum configuration. + + - [Single node k3s](quickstart-single-node.md) + - [Simple k3s cluster](quickstart-cluster.md) + - [HA k3s cluster using embedded etcd](quickstart-ha-cluster.md) + +## Example configurations and operations + +### Configuration + + - [Setting up 2-node HA control plane with external datastore](configuration/2-node-ha-ext-datastore.md) + - [Provision multiple standalone k3s nodes](configuration/multiple-standalone-k3s-nodes.md) + - [Set node labels and component arguments](configuration/node-labels-and-component-args.md) + - [Use an alternate CNI](configuration/use-an-alternate-cni.md) + - [Start K3S after another service](configuration/systemd-config.md) + +### Operations + + - [Stop/Start a cluster](operations/stop-start-cluster.md) + - [Updating k3s](operations/updating-k3s.md) + - [Extending a cluster](operations/extending-a-cluster.md) + - [Shrinking a cluster](operations/shrinking-a-cluster.md) diff --git a/server/ansible/roles/xanmanning.k3s/documentation/configuration/2-node-ha-ext-datastore.md b/server/ansible/roles/xanmanning.k3s/documentation/configuration/2-node-ha-ext-datastore.md new file mode 100644 index 000000000..f7b81307b --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/configuration/2-node-ha-ext-datastore.md @@ -0,0 +1,79 @@ +# 2 Node HA Control Plane with external database + +For this configuration we are deploying a highly available control plane +composed of two control nodes. This can be achieved with embedded etcd, however +etcd ideally has an odd number of nodes. + +The example below will use an external PostgreSQL datastore to store the +cluster state information. + +Main guide: https://rancher.com/docs/k3s/latest/en/installation/ha/ + +## Architecture + +```text + +---------------+ + | Load Balancer | + +-------+-------+ + | + | + | + | + +------------+ | +------------+ + | | | | | ++--------+ control-01 +<-----+----->+ control-02 | +| | | | | +| +-----+------+ +------+-----+ +| | | +| +-------------+-------------+ +| | | | +| +------v----+ +-----v-----+ +----v------+ +| | | | | | | +| | worker-01 | | worker-02 | | worker-03 | +| | | | | | | +| +-----------+ +-----------+ +-----------+ +| +| +-------+ +-------+ +| | | | | ++-------------------> db-01 +--+ db-02 | + | | | | + +-------+ +-------+ +``` + +### Required Components + + - Load balancer + - 2 control plane nodes + - 1 or more worker nodes + - PostgreSQL Database (replicated, or Linux HA Cluster). + +## Configuration + +For your control nodes, you will need to instruct the control plane of the +PostgreSQL datastore endpoint and set `k3s_control_node_address` to be the +hostname or IP of your load balancer. + +Below is the example for PostgreSQL, it is possible to use MySQL or an Etcd +cluster as well. Consult the below guide for using alternative datastore +endpoints. + +https://rancher.com/docs/k3s/latest/en/installation/datastore/#datastore-endpoint-format-and-functionality + +```yaml +--- + +k3s_server: + datastore-endpoint: postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable + node-taint: + - "k3s-controlplane=true:NoExecute" +``` + +Your worker nodes need to know how to connect to the control plane, this is +defined by setting `k3s_control_node_address` to the hostname or IP address of +the load balancer. + +```yaml +--- + +k3s_control_node_address: control.examplek3s.com +``` diff --git a/server/ansible/roles/xanmanning.k3s/documentation/configuration/multiple-standalone-k3s-nodes.md b/server/ansible/roles/xanmanning.k3s/documentation/configuration/multiple-standalone-k3s-nodes.md new file mode 100644 index 000000000..3e9a08a65 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/configuration/multiple-standalone-k3s-nodes.md @@ -0,0 +1,71 @@ +# Multiple standalone K3s nodes + +This is an example of when you might want to configure multiple standalone +k3s nodes simultaneously. For this we will assume a hypothetical situation +where we are configuring 25 Raspberry Pis to deploy to our shop floors. + +Each Rasperry Pi will be configured as a standalone IoT device hosting an +application that will push data to head office. + +## Architecture + +```text ++-------------+ +| | +| Node-01 +-+ +| | | ++--+----------+ +-+ + | | | + +--+---------+ +-+ + | | | + +--+--------+ | + | | Node-N + +----------+ + +``` + +## Configuration + +Below is our example inventory of 200 nodes (Truncated): + +```yaml +--- + +k3s_workers: + hosts: + kube-0: + ansible_user: ansible + ansible_host: 10.10.9.2 + ansible_python_interpreter: /usr/bin/python3 + kube-1: + ansible_user: ansible + ansible_host: 10.10.9.3 + ansible_python_interpreter: /usr/bin/python3 + kube-2: + ansible_user: ansible + ansible_host: 10.10.9.4 + ansible_python_interpreter: /usr/bin/python3 + + # ..... SNIP ..... + + kube-199: + ansible_user: ansible + ansible_host: 10.10.9.201 + ansible_python_interpreter: /usr/bin/python3 + kube-200: + ansible_user: ansible + ansible_host: 10.10.9.202 + ansible_python_interpreter: /usr/bin/python3 + +``` + +In our `group_vars/` (or as `vars:` in our playbook), we will need to set the +`k3s_build_cluster` variable to `false`. This will stop the role from +attempting to cluster all 200 nodes, instead it will install k3s across each +node as as 200 standalone servers. + +```yaml +--- + +k3s_build_cluster: false +``` diff --git a/server/ansible/roles/xanmanning.k3s/documentation/configuration/node-labels-and-component-args.md b/server/ansible/roles/xanmanning.k3s/documentation/configuration/node-labels-and-component-args.md new file mode 100644 index 000000000..3daef9b0c --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/configuration/node-labels-and-component-args.md @@ -0,0 +1,39 @@ +# Configure node labels and component arguments + +The following command line arguments can be specified multiple times with +`key=value` pairs: + + - `--kube-kubelet-arg` + - `--kube-proxy-arg` + - `--kube-apiserver-arg` + - `--kube-scheduler-arg` + - `--kube-controller-manager-arg` + - `--kube-cloud-controller-manager-arg` + - `--node-label` + - `--node-taint` + +In the config file, this is done by defining a list of values for each +command like argument, for example: + +```yaml +--- + +k3s_server: + # Set the plugins registry directory + kubelet-arg: + - "volume-plugin-dir=/var/lib/rancher/k3s/agent/kubelet/plugins_registry" + # Set the pod eviction timeout and node monitor grace period + kube-controller-manager-arg: + - "pod-eviction-timeout=2m" + - "node-monitor-grace-period=30s" + # Set API server feature gate + kube-apiserver-arg: + - "feature-gates=RemoveSelfLink=false" + # Laels to apply to a node + node-label: + - "NodeTier=development" + - "NodeLocation=eu-west-2a" + # Stop k3s control plane having workloads scheduled on them + node-taint: + - "k3s-controlplane=true:NoExecute" +``` diff --git a/server/ansible/roles/xanmanning.k3s/documentation/configuration/systemd-config.md b/server/ansible/roles/xanmanning.k3s/documentation/configuration/systemd-config.md new file mode 100644 index 000000000..3611f503e --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/configuration/systemd-config.md @@ -0,0 +1,19 @@ +# systemd config + +Below are examples to tweak how and when K3S starts up. + +## Wanted service units + +In this example, we're going to start K3S after Wireguard. Our example server +has a Wireguard connection `wg0`. We are using "wants" rather than "requires" +as it's a weaker requirement that Wireguard must be running. We then want +K3S to start after Wireguard has started. + +```yaml +--- + +k3s_service_wants: + - wg-quick@wg0.service +k3s_service_after: + - wg-quick@wg0.service +``` diff --git a/server/ansible/roles/xanmanning.k3s/documentation/configuration/use-an-alternate-cni.md b/server/ansible/roles/xanmanning.k3s/documentation/configuration/use-an-alternate-cni.md new file mode 100644 index 000000000..d67f3cafc --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/configuration/use-an-alternate-cni.md @@ -0,0 +1,63 @@ +# Use an alternate CNI + +K3S ships with Flannel, however sometimes you want an different CNI such as +Calico, Canal or Weave Net. To do this you will need to disable Flannel with +`flannel-backend: "none"`, specify a `cluster-cidr` and add your CNI manifests +to the `k3s_server_manifests_templates`. + +## Calico example + +The below is based on the +[Calico quickstart documentation](https://docs.projectcalico.org/getting-started/kubernetes/quickstart). + +Steps: + + 1. Download `tigera-operator.yaml` to the manifests directory. + 1. Download `custom-resources.yaml` to the manifests directory. + 1. Choose a `cluster-cidr` (we are using 192.168.0.0/16) + 1. Set `k3s_server` and `k3s_server_manifest_templates` as per the below, + ensure the paths to manifests are correct for your project repo. + +```yaml +--- + +# K3S Server config, don't deploy flannel and set cluster pod CIDR. +k3s_server: + cluster-cidr: 192.168.0.0/16 + flannel-backend: "none" + +# Deploy the following k3s server templates. +k3s_server_manifests_templates: + - "manifests/calico/tigera-operator.yaml" + - "manifests/calico/custom-resources.yaml" +``` + +All nodes should come up as "Ready", below is a 3-node cluster: + +```text + $ kubectl get nodes -o wide -w +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +kube-0 Ready control-plane,etcd,master 114s v1.20.2+k3s1 10.10.9.2 10.10.9.2 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1 +kube-1 Ready control-plane,etcd,master 80s v1.20.2+k3s1 10.10.9.3 10.10.9.3 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1 +kube-2 Ready control-plane,etcd,master 73s v1.20.2+k3s1 10.10.9.4 10.10.9.4 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1 +``` + +Pods should be deployed with deployed within the CIDR specified in our config +file. + +```text +$ kubectl get pods -o wide -A +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +calico-system calico-kube-controllers-cfb4ff54b-8rp8r 1/1 Running 0 5m4s 192.168.145.65 kube-0 +calico-system calico-node-2cm2m 1/1 Running 0 5m4s 10.10.9.2 kube-0 +calico-system calico-node-2s6lx 1/1 Running 0 4m42s 10.10.9.4 kube-2 +calico-system calico-node-zwqjz 1/1 Running 0 4m49s 10.10.9.3 kube-1 +calico-system calico-typha-7b6747d665-78swq 1/1 Running 0 3m5s 10.10.9.4 kube-2 +calico-system calico-typha-7b6747d665-8ff66 1/1 Running 0 3m5s 10.10.9.3 kube-1 +calico-system calico-typha-7b6747d665-hgplx 1/1 Running 0 5m5s 10.10.9.2 kube-0 +kube-system coredns-854c77959c-6qhgt 1/1 Running 0 5m20s 192.168.145.66 kube-0 +kube-system helm-install-traefik-4czr9 0/1 Completed 0 5m20s 192.168.145.67 kube-0 +kube-system metrics-server-86cbb8457f-qcxf5 1/1 Running 0 5m20s 192.168.145.68 kube-0 +kube-system traefik-6f9cbd9bd4-7h4rl 1/1 Running 0 2m50s 192.168.126.65 kube-1 +tigera-operator tigera-operator-b6c4bfdd9-29hhr 1/1 Running 0 5m20s 10.10.9.2 kube-0 +``` diff --git a/server/ansible/roles/xanmanning.k3s/documentation/operations/extending-a-cluster.md b/server/ansible/roles/xanmanning.k3s/documentation/operations/extending-a-cluster.md new file mode 100644 index 000000000..e10ffa7de --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/operations/extending-a-cluster.md @@ -0,0 +1,69 @@ +# Extending a cluster + +This document describes the method for extending an cluster with new worker +nodes. + +## Assumptions + +It is assumed that you have already deployed a k3s cluster using this role, +you have an appropriately configured inventory and playbook to create the +cluster. + +Below, our example inventory and playbook are as follows: + + - inventory: `inventory.yml` + - playbook: `cluster.yml` + +Currently your `inventory.yml` looks like this, it has two nodes defined, +`kube-0` (control node) and `kube-1` (worker node). + +```yaml +--- + +k3s_cluster: + hosts: + kube-0: + ansible_user: ansible + ansible_host: 10.10.9.2 + ansible_python_interpreter: /usr/bin/python3 + kube-1: + ansible_user: ansible + ansible_host: 10.10.9.3 + ansible_python_interpreter: /usr/bin/python3 +``` + +## Method + +We have our two nodes, one control, one worker. The goal is to extend this to +add capacity by adding a new worker node, `kube-2`. To do this we will add the +new node to our inventory. + +```yaml +--- + +k3s_cluster: + hosts: + kube-0: + ansible_user: ansible + ansible_host: 10.10.9.2 + ansible_python_interpreter: /usr/bin/python3 + kube-1: + ansible_user: ansible + ansible_host: 10.10.9.3 + ansible_python_interpreter: /usr/bin/python3 + kube-2: + ansible_user: ansible + ansible_host: 10.10.9.4 + ansible_python_interpreter: /usr/bin/python3 +``` + +Once the new node has been added, you can re-run the automation to join it to +the cluster. You should expect the majority of changes to the worker node being +introduced to the cluster. + +```text +PLAY RECAP ******************************************************************************************************* +kube-0 : ok=53 changed=1 unreachable=0 failed=0 skipped=30 rescued=0 ignored=0 +kube-1 : ok=40 changed=1 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0 +kube-2 : ok=42 changed=10 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0 +``` diff --git a/server/ansible/roles/xanmanning.k3s/documentation/operations/shrinking-a-cluster.md b/server/ansible/roles/xanmanning.k3s/documentation/operations/shrinking-a-cluster.md new file mode 100644 index 000000000..bf900b15b --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/operations/shrinking-a-cluster.md @@ -0,0 +1,74 @@ +# Shrinking a cluster + +This document describes the method for shrinking a cluster, by removing a +worker nodes. + +## Assumptions + +It is assumed that you have already deployed a k3s cluster using this role, +you have an appropriately configured inventory and playbook to create the +cluster. + +Below, our example inventory and playbook are as follows: + + - inventory: `inventory.yml` + - playbook: `cluster.yml` + +Currently your `inventory.yml` looks like this, it has three nodes defined, +`kube-0` (control node) and `kube-1`, `kube-2` (worker nodes). + +```yaml +--- + +k3s_cluster: + hosts: + kube-0: + ansible_user: ansible + ansible_host: 10.10.9.2 + ansible_python_interpreter: /usr/bin/python3 + kube-1: + ansible_user: ansible + ansible_host: 10.10.9.3 + ansible_python_interpreter: /usr/bin/python3 + kube-2: + ansible_user: ansible + ansible_host: 10.10.9.4 + ansible_python_interpreter: /usr/bin/python3 +``` + +## Method + +We have our three nodes, one control, two workers. The goal is to shrink this to +remove excess capacity by offboarding the worker node `kube-2`. To do this we +will set `kube-2` node to `k3s_state: uninstalled` in our inventory. + +```yaml +--- + +k3s_cluster: + hosts: + kube-0: + ansible_user: ansible + ansible_host: 10.10.9.2 + ansible_python_interpreter: /usr/bin/python3 + kube-1: + ansible_user: ansible + ansible_host: 10.10.9.3 + ansible_python_interpreter: /usr/bin/python3 + kube-2: + ansible_user: ansible + ansible_host: 10.10.9.4 + ansible_python_interpreter: /usr/bin/python3 + k3s_state: uninstalled +``` + +What you will typically see is changes to your control plane (`kube-0`) and the +node being removed (`kube-2`). The role will register the removal of the node +with the cluster by draining the node and removing it from the cluster. + +```text +PLAY RECAP ******************************************************************************************************* +kube-0 : ok=55 changed=2 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0 +kube-1 : ok=40 changed=0 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0 +kube-2 : ok=23 changed=2 unreachable=0 failed=0 skipped=17 rescued=0 ignored=1 +``` diff --git a/server/ansible/roles/xanmanning.k3s/documentation/operations/stop-start-cluster.md b/server/ansible/roles/xanmanning.k3s/documentation/operations/stop-start-cluster.md new file mode 100644 index 000000000..c321e00b2 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/operations/stop-start-cluster.md @@ -0,0 +1,93 @@ +# Stopping and Starting a cluster + +This document describes the Ansible method for restarting a k3s cluster +deployed by this role. + +## Assumptions + +It is assumed that you have already deployed a k3s cluster using this role, +you have an appropriately configured inventory and playbook to create the +cluster. + +Below, our example inventory and playbook are as follows: + + - inventory: `inventory.yml` + - playbook: `cluster.yml` + +## Method + +### Start cluster + +You can start the cluster using either of the following commands: + + - Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=started'` + - Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=started' --become all` + +Below is example output, remember that Ansible is idempotent so re-running a +command may not necessarily change the state. + +**Playbook method output**: + +```text +PLAY RECAP ******************************************************************************************************* +kube-0 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 +kube-1 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 +kube-2 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 +``` + +### Stop cluster + +You can stop the cluster using either of the following commands: + + - Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=stopped'` + - Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=stopped' --become all` + +Below is example output, remember that Ansible is idempotent so re-running a +command may not necessarily change the state. + +**Playbook method output**: + +```text +PLAY RECAP ******************************************************************************************************* +kube-0 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 +kube-1 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 +kube-2 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 +``` + +### Restart cluster + +Just like the `service` module, you can also specify `restarted` as a state. +This will do `stop` followed by `start`. + + - Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=restarted'` + - Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=restarted' --become all` + +```text +PLAY RECAP ******************************************************************************************************* +kube-0 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 +kube-1 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 +kube-2 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 +``` + +## Tips + +You can limit the targets by adding the `-l` flag to your `ansible-playbook` +command, or simply target your ad-hoc commands. For example, in a 3 node +cluster (called `kube-0`, `kube-1` and `kube-2`) we can limit the restart to +`kube-1` and `kube-2` with the following: + + - Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=restarted' -l "kube-1,kube-2"` + - Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=restarted' --become "kube-1,kube-2"` + +```text +PLAY RECAP ******************************************************************************************************** +kube-1 : ok=7 changed=2 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 +kube-2 : ok=7 changed=2 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 +``` + +## FAQ + + 1. _Why might I use the `ansible-playbook` command over an ad-hoc command?_ + - The stop/start tasks will be aware of configuration. As the role + develops, there might be some pre-tasks added to change how a cluster + is stopped or started. diff --git a/server/ansible/roles/xanmanning.k3s/documentation/operations/updating-k3s.md b/server/ansible/roles/xanmanning.k3s/documentation/operations/updating-k3s.md new file mode 100644 index 000000000..b6713e4c7 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/operations/updating-k3s.md @@ -0,0 +1,52 @@ +# Updating k3s + +## Before you start! + +Ensure you back up your k3s cluster. This is particularly important if you use +an external datastore or embedded Etcd. Please refer to the below guide to +backing up your k3s datastore: + +https://rancher.com/docs/k3s/latest/en/backup-restore/ + +Also, check your volume backups are also working! + +## Proceedure + +### Updates using Ansible + +To update via Ansible, set `k3s_release_version` to the target version you wish +to go to. For example, from your `v1.19.3+k3s1` playbook: + +```yaml +--- +# BEFORE + +- name: Provision k3s cluster + hosts: k3s_cluster + roles: + - name: xanmanning.k3s + vars: + k3s_release_version: v1.19.3+k3s1 +``` + +Updating to `v1.20.2+k3s1`: + +```yaml +--- +# AFTER + +- name: Provision k3s cluster + hosts: k3s_cluster + roles: + - name: xanmanning.k3s + vars: + k3s_release_version: v1.20.2+k3s1 +``` + +### Automatic updates + +For automatic updates, consider installing Rancher's +[system-upgrade-controller](https://rancher.com/docs/k3s/latest/en/upgrades/automated/) + +**Please note**, to be able to update using the system-upgrade-controller you +will need to set `k3s_install_hard_links` to `true`. diff --git a/server/ansible/roles/xanmanning.k3s/documentation/quickstart-cluster.md b/server/ansible/roles/xanmanning.k3s/documentation/quickstart-cluster.md new file mode 100644 index 000000000..a07b8bf5f --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/quickstart-cluster.md @@ -0,0 +1,147 @@ +# Quickstart: K3s cluster with a single control node + +This is the quickstart guide to creating your own k3s cluster with one control +plane node. This control plane node will also be a worker. + +:hand: This example requires your Ansible user to be able to connect to the +servers over SSH using key-based authentication. The user is also has an entry +in a sudoers file that allows privilege escalation without requiring a +password. + +To test this is the case, run the following check replacing `` +and ``. The expected output is `Works` + +`ssh @ 'sudo cat /etc/shadow >/dev/null && echo "Works"'` + +For example: + +```text +[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"' +Works +[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ +``` + +## Directory structure + +Our working directory will have the following files: + +```text +kubernetes-playground/ + |_ inventory.yml + |_ cluster.yml +``` + +## Inventory + +Here's a YAML based example inventory for our servers called `inventory.yml`: + +```yaml +--- + +k3s_cluster: + hosts: + kube-0: + ansible_user: ansible + ansible_host: 10.10.9.2 + ansible_python_interpreter: /usr/bin/python3 + kube-1: + ansible_user: ansible + ansible_host: 10.10.9.3 + ansible_python_interpreter: /usr/bin/python3 + kube-2: + ansible_user: ansible + ansible_host: 10.10.9.4 + ansible_python_interpreter: /usr/bin/python3 + +``` + +We can test this works with `ansible -i inventory.yml -m ping all`, expected +result: + +```text +kube-0 | SUCCESS => { + "changed": false, + "ping": "pong" +} +kube-1 | SUCCESS => { + "changed": false, + "ping": "pong" +} +kube-2 | SUCCESS => { + "changed": false, + "ping": "pong" +} + +``` + +## Playbook + +Here is our playbook for the k3s cluster (`cluster.yml`): + +```yaml +--- + +- name: Build a cluster with a single control node + hosts: k3s_cluster + vars: + k3s_become_for_all: true + roles: + - role: xanmanning.k3s +``` + +## Execution + +To execute the playbook against our inventory file, we will run the following +command: + +`ansible-playbook -i inventory.yml cluster.yml` + +The output we can expect is similar to the below, with no failed or unreachable +nodes. The default behavior of this role is to delegate the first play host as +the control node, so kube-0 will have more changed tasks than others: + +```text +PLAY RECAP ******************************************************************************************************* +kube-0 : ok=56 changed=11 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0 +kube-1 : ok=43 changed=10 unreachable=0 failed=0 skipped=32 rescued=0 ignored=0 +kube-2 : ok=43 changed=10 unreachable=0 failed=0 skipped=32 rescued=0 ignored=0 +``` + +## Testing + +After logging into kube-0, we can test that k3s is running across the cluster, +that all nodes are ready and that everything is ready to execute our Kubernetes +workloads by running the following: + + - `sudo kubectl get nodes -o wide` + - `sudo kubectl get pods -o wide --all-namespaces` + +:hand: Note we are using `sudo` because we need to be root to access the +kube config for this node. This behavior can be changed with specifying +`write-kubeconfig-mode: 0644` in `k3s_server`. + +**Get Nodes**: + +```text +ansible@kube-0:~$ sudo kubectl get nodes -o wide +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +kube-0 Ready master 34s v1.19.4+k3s1 10.0.2.15 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 +kube-2 Ready 14s v1.19.4+k3s1 10.0.2.17 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 +kube-1 Ready 14s v1.19.4+k3s1 10.0.2.16 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 +ansible@kube-0:~$ +``` + +**Get Pods**: + +```text +ansible@kube-0:~$ sudo kubectl get pods -o wide --all-namespaces +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +kube-system local-path-provisioner-7ff9579c6-72j8x 1/1 Running 0 55s 10.42.2.2 kube-1 +kube-system metrics-server-7b4f8b595-lkspj 1/1 Running 0 55s 10.42.1.2 kube-2 +kube-system helm-install-traefik-b6vnt 0/1 Completed 0 55s 10.42.0.3 kube-0 +kube-system coredns-66c464876b-llsh7 1/1 Running 0 55s 10.42.0.2 kube-0 +kube-system svclb-traefik-jrqg7 2/2 Running 0 27s 10.42.1.3 kube-2 +kube-system svclb-traefik-gh65q 2/2 Running 0 27s 10.42.0.4 kube-0 +kube-system svclb-traefik-5z7zp 2/2 Running 0 27s 10.42.2.3 kube-1 +kube-system traefik-5dd496474-l2k74 1/1 Running 0 27s 10.42.1.4 kube-2 +``` diff --git a/server/ansible/roles/xanmanning.k3s/documentation/quickstart-ha-cluster.md b/server/ansible/roles/xanmanning.k3s/documentation/quickstart-ha-cluster.md new file mode 100644 index 000000000..1b0291fcc --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/quickstart-ha-cluster.md @@ -0,0 +1,154 @@ +# Quickstart: K3s cluster with a HA control plane using embedded etcd + +This is the quickstart guide to creating your own 3 node k3s cluster with a +highly available control plane using the embedded etcd datastore. +The control plane will all be workers as well. + +:hand: This example requires your Ansible user to be able to connect to the +servers over SSH using key-based authentication. The user is also has an entry +in a sudoers file that allows privilege escalation without requiring a +password. + +To test this is the case, run the following check replacing `` +and ``. The expected output is `Works` + +`ssh @ 'sudo cat /etc/shadow >/dev/null && echo "Works"'` + +For example: + +```text +[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"' +Works +[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ +``` + +## Directory structure + +Our working directory will have the following files: + +```text +kubernetes-playground/ + |_ inventory.yml + |_ ha_cluster.yml +``` + +## Inventory + +Here's a YAML based example inventory for our servers called `inventory.yml`: + +```yaml +--- + +# We're adding k3s_control_node to each host, this can be done in host_vars/ +# or group_vars/ as well - but for simplicity we are setting it here. +k3s_cluster: + hosts: + kube-0: + ansible_user: ansible + ansible_host: 10.10.9.2 + ansible_python_interpreter: /usr/bin/python3 + k3s_control_node: true + kube-1: + ansible_user: ansible + ansible_host: 10.10.9.3 + ansible_python_interpreter: /usr/bin/python3 + k3s_control_node: true + kube-2: + ansible_user: ansible + ansible_host: 10.10.9.4 + ansible_python_interpreter: /usr/bin/python3 + k3s_control_node: true + +``` + +We can test this works with `ansible -i inventory.yml -m ping all`, expected +result: + +```text +kube-0 | SUCCESS => { + "changed": false, + "ping": "pong" +} +kube-1 | SUCCESS => { + "changed": false, + "ping": "pong" +} +kube-2 | SUCCESS => { + "changed": false, + "ping": "pong" +} + +``` + +## Playbook + +Here is our playbook for the k3s cluster (`ha_cluster.yml`): + +```yaml +--- + +- name: Build a cluster with HA control plane + hosts: k3s_cluster + vars: + k3s_become_for_all: true + k3s_etcd_datastore: true + k3s_use_experimental: true # Note this is required for k3s < v1.19.5+k3s1 + roles: + - role: xanmanning.k3s +``` + +## Execution + +To execute the playbook against our inventory file, we will run the following +command: + +`ansible-playbook -i inventory.yml ha_cluster.yml` + +The output we can expect is similar to the below, with no failed or unreachable +nodes. The default behavior of this role is to delegate the first play host as +the primary control node, so kube-0 will have more changed tasks than others: + +```text +PLAY RECAP ******************************************************************************************************* +kube-0 : ok=53 changed=8 unreachable=0 failed=0 skipped=30 rescued=0 ignored=0 +kube-1 : ok=47 changed=10 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0 +kube-2 : ok=47 changed=9 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0 +``` + +## Testing + +After logging into any of the servers (it doesn't matter), we can test that k3s +is running across the cluster, that all nodes are ready and that everything is +ready to execute our Kubernetes workloads by running the following: + + - `sudo kubectl get nodes -o wide` + - `sudo kubectl get pods -o wide --all-namespaces` + +:hand: Note we are using `sudo` because we need to be root to access the +kube config for this node. This behavior can be changed with specifying +`write-kubeconfig-mode: 0644` in `k3s_server`. + +**Get Nodes**: + +```text +ansible@kube-0:~$ sudo kubectl get nodes -o wide +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +kube-0 Ready etcd,master 2m58s v1.19.4+k3s1 10.10.9.2 10.10.9.2 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 +kube-1 Ready etcd,master 2m22s v1.19.4+k3s1 10.10.9.3 10.10.9.3 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 +kube-2 Ready etcd,master 2m10s v1.19.4+k3s1 10.10.9.4 10.10.9.4 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 +``` + +**Get Pods**: + +```text +ansible@kube-0:~$ sudo kubectl get pods -o wide --all-namespaces +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +kube-system coredns-66c464876b-rhgn6 1/1 Running 0 3m38s 10.42.0.2 kube-0 +kube-system helm-install-traefik-vwglv 0/1 Completed 0 3m39s 10.42.0.3 kube-0 +kube-system local-path-provisioner-7ff9579c6-d5xpb 1/1 Running 0 3m38s 10.42.0.5 kube-0 +kube-system metrics-server-7b4f8b595-nhbt8 1/1 Running 0 3m38s 10.42.0.4 kube-0 +kube-system svclb-traefik-9lzcq 2/2 Running 0 2m56s 10.42.1.2 kube-1 +kube-system svclb-traefik-vq487 2/2 Running 0 2m45s 10.42.2.2 kube-2 +kube-system svclb-traefik-wkwkk 2/2 Running 0 3m1s 10.42.0.7 kube-0 +kube-system traefik-5dd496474-lw6x8 1/1 Running 0 3m1s 10.42.0.6 kube-0 +``` diff --git a/server/ansible/roles/xanmanning.k3s/documentation/quickstart-single-node.md b/server/ansible/roles/xanmanning.k3s/documentation/quickstart-single-node.md new file mode 100644 index 000000000..959708306 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/documentation/quickstart-single-node.md @@ -0,0 +1,121 @@ +# Quickstart: K3s single node + +This is the quickstart guide to creating your own single-node k3s "cluster". + +:hand: This example requires your Ansible user to be able to connect to the +server over SSH using key-based authentication. The user is also has an entry +in a sudoers file that allows privilege escalation without requiring a +password. + +To test this is the case, run the following check replacing `` +and ``. The expected output is `Works` + +`ssh @ 'sudo cat /etc/shadow >/dev/null && echo "Works"'` + +For example: + +```text +[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"' +Works +[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ +``` + +## Directory structure + +Our working directory will have the following files: + +```text +kubernetes-playground/ + |_ inventory.yml + |_ single_node.yml +``` + +## Inventory + +Here's a YAML based example inventory for our server called `inventory.yml`: + +```yaml +--- + +k3s_cluster: + hosts: + kube-0: + ansible_user: ansible + ansible_host: 10.10.9.2 + ansible_python_interpreter: /usr/bin/python3 + +``` + +We can test this works with `ansible -i inventory.yml -m ping all`, expected +result: + +```text +kube-0 | SUCCESS => { + "changed": false, + "ping": "pong" +} +``` + +## Playbook + +Here is our playbook for a single node k3s cluster (`single_node.yml`): + +```yaml +--- + +- name: Build a single node k3s cluster + hosts: kube-0 + vars: + k3s_become_for_all: true + roles: + - role: xanmanning.k3s +``` + +## Execution + +To execute the playbook against our inventory file, we will run the following +command: + +`ansible-playbook -i inventory.yml single_node.yml` + +The output we can expect is similar to the below, with no failed or unreachable +nodes: + +```text +PLAY RECAP ******************************************************************************************************* +kube-0 : ok=39 changed=8 unreachable=0 failed=0 skipped=39 rescued=0 ignored=0 +``` + +## Testing + +After logging into the server, we can test that k3s is running and that it is +ready to execute our Kubernetes workloads by running the following: + + - `sudo kubectl get nodes` + - `sudo kubectl get pods -o wide --all-namespaces` + +:hand: Note we are using `sudo` because we need to be root to access the +kube config for this node. This behavior can be changed with specifying +`write-kubeconfig-mode: 0644` in `k3s_server`. + +**Get Nodes**: + +```text +ansible@kube-0:~$ sudo kubectl get nodes +NAME STATUS ROLES AGE VERSION +kube-0 Ready master 5m27s v1.19.4+k3s +ansible@kube-0:~$ +``` + +**Get Pods**: + +```text +ansible@kube-0:~$ sudo kubectl get pods --all-namespaces -o wide +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +kube-system metrics-server-7b4f8b595-k692h 1/1 Running 0 9m38s 10.42.0.2 kube-0 +kube-system local-path-provisioner-7ff9579c6-5lgzb 1/1 Running 0 9m38s 10.42.0.3 kube-0 +kube-system coredns-66c464876b-xg42q 1/1 Running 0 9m38s 10.42.0.5 kube-0 +kube-system helm-install-traefik-tdpcs 0/1 Completed 0 9m38s 10.42.0.4 kube-0 +kube-system svclb-traefik-hk248 2/2 Running 0 9m4s 10.42.0.7 kube-0 +kube-system traefik-5dd496474-bf4kv 1/1 Running 0 9m4s 10.42.0.6 kube-0 +``` diff --git a/server/ansible/roles/xanmanning.k3s/handlers/main.yml b/server/ansible/roles/xanmanning.k3s/handlers/main.yml new file mode 100644 index 000000000..2dd40eb46 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/handlers/main.yml @@ -0,0 +1,32 @@ +--- + +- name: reload systemd + ansible.builtin.systemd: + daemon_reload: true + scope: "{{ k3s_systemd_context }}" + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" + +- name: restart k3s + ansible.builtin.systemd: + name: k3s + state: restarted + scope: "{{ k3s_systemd_context }}" + enabled: "{{ k3s_start_on_boot }}" + retries: 3 + delay: 3 + register: k3s_systemd_restart_k3s + failed_when: + - k3s_systemd_restart_k3s is not success + - not ansible_check_mode + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" + +- name: restart docker + ansible.builtin.systemd: + name: docker + state: restarted + enabled: true + register: k3s_systemd_restart_docker + failed_when: + - k3s_systemd_restart_docker is not success + - not ansible_check_mode + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/meta/.galaxy_install_info b/server/ansible/roles/xanmanning.k3s/meta/.galaxy_install_info new file mode 100644 index 000000000..387769fa4 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Sat Mar 20 23:31:38 2021 +version: v2.8.0 diff --git a/server/ansible/roles/xanmanning.k3s/meta/main.yml b/server/ansible/roles/xanmanning.k3s/meta/main.yml new file mode 100644 index 000000000..d276bf000 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/meta/main.yml @@ -0,0 +1,83 @@ +--- + +galaxy_info: + role_name: k3s + namespace: xanmanning + author: Xan Manning + description: Ansible role for installing k3s as either a standalone server or HA cluster + company: Pyrat Ltd. + github_branch: main + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: BSD + + min_ansible_version: '2.9' + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually main) will be used. + # github_branch: + + # + # platforms is a list of platforms, and each platform has a name and a list of versions. + # + platforms: + - name: Archlinux + versions: + - all + - name: EL + versions: + - 7 + - 8 + - name: Amazon + - name: Fedora + versions: + - 29 + - 30 + - 31 + - name: Debian + versions: + - buster + - jessie + - stretch + - name: SLES + versions: + - 15 + - name: Ubuntu + versions: + - xenial + - bionic + + galaxy_tags: + - k3s + - k8s + - kubernetes + - containerd + - cluster + - lightweight + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] +# List your role dependencies here, one per line. Be sure to remove the '[]' above, +# if you add dependencies to this list. diff --git a/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/converge.yml b/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/converge.yml new file mode 100644 index 000000000..3418dd02b --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/converge.yml @@ -0,0 +1,16 @@ +--- +- name: Converge + hosts: node* + become: true + vars: + molecule_is_test: true + k3s_build_cluster: false + k3s_install_dir: /opt/k3s/bin + k3s_config_file: /opt/k3s/etc/k3s.yaml + k3s_server: + data-dir: /var/lib/k3s-io + default-local-storage-path: /var/lib/k3s-io/local-storage + k3s_server_manifests_templates: + - "molecule/autodeploy/templates/00-ns-monitoring.yml.j2" + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/molecule.yml b/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/molecule.yml new file mode 100644 index 000000000..ecd9981a8 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/molecule.yml @@ -0,0 +1,44 @@ +--- + +dependency: + name: galaxy +driver: + name: docker +lint: | + set -e + yamllint -s . + ansible-lint --exclude molecule/ +platforms: + - name: node1 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node2 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node3 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet +provisioner: + name: ansible + options: + verbose: true +verifier: + name: ansible diff --git a/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/prepare.yml b/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/prepare.yml new file mode 100644 index 000000000..470bb8891 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/prepare.yml @@ -0,0 +1,9 @@ +--- +- name: Prepare + hosts: node* + become: true + tasks: + - name: Ensure apt cache is updated + ansible.builtin.apt: + update_cache: true + when: ansible_pkg_mgr == 'apt' diff --git a/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/templates/00-ns-monitoring.yml.j2 b/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/templates/00-ns-monitoring.yml.j2 new file mode 100644 index 000000000..d32523606 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/templates/00-ns-monitoring.yml.j2 @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: monitoring diff --git a/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/verify.yml b/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/verify.yml new file mode 100644 index 000000000..86afba4ff --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/autodeploy/verify.yml @@ -0,0 +1,9 @@ +--- +# This is an example playbook to execute Ansible tests. + +- name: Verify + hosts: all + tasks: + - name: Example assertion + ansible.builtin.assert: + that: true diff --git a/server/ansible/roles/xanmanning.k3s/molecule/debug/converge.yml b/server/ansible/roles/xanmanning.k3s/molecule/debug/converge.yml new file mode 100644 index 000000000..086e26953 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/debug/converge.yml @@ -0,0 +1,12 @@ +--- +- name: Converge + hosts: all + become: true + vars: + pyratlabs_issue_controller_dump: true + pre_tasks: + - name: Ensure k3s_debug is set + ansible.builtin.set_fact: + k3s_debug: true + roles: + - xanmanning.k3s diff --git a/server/ansible/roles/xanmanning.k3s/molecule/debug/molecule.yml b/server/ansible/roles/xanmanning.k3s/molecule/debug/molecule.yml new file mode 100644 index 000000000..ecd9981a8 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/debug/molecule.yml @@ -0,0 +1,44 @@ +--- + +dependency: + name: galaxy +driver: + name: docker +lint: | + set -e + yamllint -s . + ansible-lint --exclude molecule/ +platforms: + - name: node1 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node2 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node3 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet +provisioner: + name: ansible + options: + verbose: true +verifier: + name: ansible diff --git a/server/ansible/roles/xanmanning.k3s/molecule/debug/prepare.yml b/server/ansible/roles/xanmanning.k3s/molecule/debug/prepare.yml new file mode 100644 index 000000000..2a1a341e8 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/debug/prepare.yml @@ -0,0 +1,8 @@ +--- +- name: Prepare + hosts: all + tasks: + - name: Ensure apt cache is updated + ansible.builtin.apt: + update_cache: true + when: ansible_pkg_mgr == 'apt' diff --git a/server/ansible/roles/xanmanning.k3s/molecule/debug/verify.yml b/server/ansible/roles/xanmanning.k3s/molecule/debug/verify.yml new file mode 100644 index 000000000..86afba4ff --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/debug/verify.yml @@ -0,0 +1,9 @@ +--- +# This is an example playbook to execute Ansible tests. + +- name: Verify + hosts: all + tasks: + - name: Example assertion + ansible.builtin.assert: + that: true diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/Dockerfile.j2 b/server/ansible/roles/xanmanning.k3s/molecule/default/Dockerfile.j2 new file mode 100644 index 000000000..99061d7e4 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/Dockerfile.j2 @@ -0,0 +1,26 @@ +# Molecule managed + +{% if item.registry is defined %} +FROM {{ item.registry.url }}/{{ item.image }} +{% else %} +FROM {{ item.image }} +{% endif %} + +RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python systemd sudo bash ca-certificates && apt-get clean; \ + elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python systemd sudo python-devel python*-dnf bash && dnf clean all; \ + elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python systemd sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ + elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python systemd sudo bash python-xml && zypper clean -a; \ + elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo systemd bash ca-certificates; \ + elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python systemd sudo bash ca-certificates && xbps-remove -O; fi + +RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \ + rm -f /lib/systemd/system/multi-user.target.wants/*; \ + rm -f /etc/systemd/system/*.wants/*; \ + rm -f /lib/systemd/system/local-fs.target.wants/*; \ + rm -f /lib/systemd/system/sockets.target.wants/*udev*; \ + rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ + rm -f /lib/systemd/system/basic.target.wants/*; \ + rm -f /lib/systemd/system/anaconda.target.wants/*; + +VOLUME [“/sys/fs/cgroup”] +CMD [“/usr/sbin/init”] diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/INSTALL.rst b/server/ansible/roles/xanmanning.k3s/molecule/default/INSTALL.rst new file mode 100644 index 000000000..6a44bde9e --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/INSTALL.rst @@ -0,0 +1,22 @@ +******* +Docker driver installation guide +******* + +Requirements +============ + +* Docker Engine + +Install +======= + +Please refer to the `Virtual environment`_ documentation for installation best +practices. If not using a virtual environment, please consider passing the +widely recommended `'--user' flag`_ when invoking ``pip``. + +.. _Virtual environment: https://virtualenv.pypa.io/en/latest/ +.. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site + +.. code-block:: bash + + $ pip install 'molecule[docker]' diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/converge.yml b/server/ansible/roles/xanmanning.k3s/molecule/default/converge.yml new file mode 100644 index 000000000..3bf3a366d --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/converge.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + molecule_is_test: true + k3s_install_hard_links: true + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/molecule.yml b/server/ansible/roles/xanmanning.k3s/molecule/default/molecule.yml new file mode 100644 index 000000000..ecd9981a8 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/molecule.yml @@ -0,0 +1,44 @@ +--- + +dependency: + name: galaxy +driver: + name: docker +lint: | + set -e + yamllint -s . + ansible-lint --exclude molecule/ +platforms: + - name: node1 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node2 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node3 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet +provisioner: + name: ansible + options: + verbose: true +verifier: + name: ansible diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-download.yml b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-download.yml new file mode 100644 index 000000000..4635022c9 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-download.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + molecule_is_test: true + k3s_state: downloaded + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-restart-cluster.yml b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-restart-cluster.yml new file mode 100644 index 000000000..33d06e85b --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-restart-cluster.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + molecule_is_test: true + k3s_state: restarted + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-rootless.yml b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-rootless.yml new file mode 100644 index 000000000..5c0af5bc9 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-rootless.yml @@ -0,0 +1,15 @@ +--- +- name: Converge + hosts: node1 + become: true + become_user: k3suser + vars: + molecule_is_test: true + k3s_use_experimental: true + k3s_server: + rootless: true + k3s_agent: + rootless: true + k3s_install_dir: "/home/{{ ansible_user_id }}/bin" + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-standalone.yml b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-standalone.yml new file mode 100644 index 000000000..86938ed05 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-standalone.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + molecule_is_test: true + k3s_build_cluster: false + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-start-cluster.yml b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-start-cluster.yml new file mode 100644 index 000000000..3c31263ea --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-start-cluster.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + molecule_is_test: true + k3s_state: started + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-stop-cluster.yml b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-stop-cluster.yml new file mode 100644 index 000000000..e736c1265 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-stop-cluster.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + molecule_is_test: true + k3s_state: stopped + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-uninstall-cluster.yml b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-uninstall-cluster.yml new file mode 100644 index 000000000..c84ec57d0 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/playbook-uninstall-cluster.yml @@ -0,0 +1,9 @@ +--- +- name: Converge + hosts: all + become: true + vars: + molecule_is_test: true + k3s_state: uninstalled + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/prepare-rootless.yml b/server/ansible/roles/xanmanning.k3s/molecule/default/prepare-rootless.yml new file mode 100644 index 000000000..d02d3e5f2 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/prepare-rootless.yml @@ -0,0 +1,23 @@ +--- +- name: Prepare + hosts: node1 + become: true + tasks: + - name: Ensure a user group exists + ansible.builtin.group: + name: user + state: present + + - name: Ensure a normal user exists + ansible.builtin.user: + name: k3suser + group: user + state: present + + - name: Ensure a normal user has bin directory + ansible.builtin.file: + path: /home/k3suser/bin + state: directory + owner: k3suser + group: user + mode: 0700 diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/prepare.yml b/server/ansible/roles/xanmanning.k3s/molecule/default/prepare.yml new file mode 100644 index 000000000..2a1a341e8 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/prepare.yml @@ -0,0 +1,8 @@ +--- +- name: Prepare + hosts: all + tasks: + - name: Ensure apt cache is updated + ansible.builtin.apt: + update_cache: true + when: ansible_pkg_mgr == 'apt' diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/tests/test_default.py b/server/ansible/roles/xanmanning.k3s/molecule/default/tests/test_default.py new file mode 100644 index 000000000..eedd64a1d --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/default/tests/test_default.py @@ -0,0 +1,14 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_hosts_file(host): + f = host.file('/etc/hosts') + + assert f.exists + assert f.user == 'root' + assert f.group == 'root' diff --git a/server/ansible/roles/xanmanning.k3s/molecule/default/tests/test_default.pyc b/server/ansible/roles/xanmanning.k3s/molecule/default/tests/test_default.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28c2e2d42688dc5ec97e4aa0c0223353ad18569d GIT binary patch literal 1025 zcmdr~&2H2%5FRJnUs3jiP%k~_(#@gbh7eViszu9I+5!?Qp(st9)>*F|*^?DjyBFZ$ z;3aqo9-tjNoBqB5`5BMDjK7)rCgH!W&A-3HZ#gWlkk zB^iA>8GlU=kB14m^qG`7qcvE*2ejHDnQKbrE20Do2G^92f!zTN;77X+T_Mu|?A`;g z*MUb%klH$pT&9uTBpY+g;|p!kVvn{)YxL#%T`45TRf73<;UZp=yKgNwD6~q9(Z+{0 z4sku68{O2AE8XLc7F{!CL!rdcZIS@9q&%%g_ysIXIk>Sk^J7twVn#X z5V?tM&Q)fFj_2HnQV@dNqEMyD;;XBNp@WVoErj7YYIC_*JdNq-QkBj4^Uqz9M-(n$ zTJSP!Bo69J>{B-$^Ae^`unmIjIvHU7pwC{h$mvXD0w6thoZnSFXi!Ky$dnallBcFo zij;H&cOhO^ALiA661qj9`|as*dw7iyuI9pM<%!NYrtNsUQoUX+Hg=WW&*py|kQ>C? UnJ(zBdgp#cKbaQ+-E2F&1#loHZU6uP literal 0 HcmV?d00001 diff --git a/server/ansible/roles/xanmanning.k3s/molecule/docker/converge.yml b/server/ansible/roles/xanmanning.k3s/molecule/docker/converge.yml new file mode 100644 index 000000000..917ecbbaa --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/docker/converge.yml @@ -0,0 +1,13 @@ +--- +- name: Converge + hosts: all + become: true + vars: + molecule_is_test: true + k3s_server: + https-listen-port: 26443 + cluster-domain: examplecluster.local + k3s_agent: + docker: true + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/docker/molecule.yml b/server/ansible/roles/xanmanning.k3s/molecule/docker/molecule.yml new file mode 100644 index 000000000..0262e2fa7 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/docker/molecule.yml @@ -0,0 +1,44 @@ +--- + +dependency: + name: galaxy +driver: + name: docker +lint: | + set -e + yamllint -s . + ansible-lint --exclude molecule/ +platforms: + - name: node1 + image: "${MOLECULE_DISTRO:-geerlingguy/docker-ubuntu2004-ansible:latest}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node2 + image: "${MOLECULE_DISTRO:-geerlingguy/docker-ubuntu2004-ansible:latest}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node3 + image: "${MOLECULE_DISTRO:-geerlingguy/docker-ubuntu2004-ansible:latest}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet +provisioner: + name: ansible + options: + verbose: true +verifier: + name: ansible diff --git a/server/ansible/roles/xanmanning.k3s/molecule/docker/prepare.yml b/server/ansible/roles/xanmanning.k3s/molecule/docker/prepare.yml new file mode 100644 index 000000000..2a1a341e8 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/docker/prepare.yml @@ -0,0 +1,8 @@ +--- +- name: Prepare + hosts: all + tasks: + - name: Ensure apt cache is updated + ansible.builtin.apt: + update_cache: true + when: ansible_pkg_mgr == 'apt' diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/Dockerfile.j2 b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/Dockerfile.j2 new file mode 100644 index 000000000..36166d64e --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/Dockerfile.j2 @@ -0,0 +1,7 @@ +# Molecule managed + +{% if item.registry is defined %} +FROM {{ item.registry.url }}/{{ item.image }} +{% else %} +FROM {{ item.image }} +{% endif %} diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/INSTALL.rst b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/INSTALL.rst new file mode 100644 index 000000000..6a44bde9e --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/INSTALL.rst @@ -0,0 +1,22 @@ +******* +Docker driver installation guide +******* + +Requirements +============ + +* Docker Engine + +Install +======= + +Please refer to the `Virtual environment`_ documentation for installation best +practices. If not using a virtual environment, please consider passing the +widely recommended `'--user' flag`_ when invoking ``pip``. + +.. _Virtual environment: https://virtualenv.pypa.io/en/latest/ +.. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site + +.. code-block:: bash + + $ pip install 'molecule[docker]' diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/converge.yml b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/converge.yml new file mode 100644 index 000000000..a3eb262aa --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/converge.yml @@ -0,0 +1,17 @@ +--- + +- name: Converge + hosts: node* + become: true + vars: + molecule_is_test: true + k3s_registration_address: loadbalancer + k3s_server: + datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable" + pre_tasks: + - name: Set each node to be a control node + ansible.builtin.set_fact: + k3s_control_node: true + when: inventory_hostname in ['node2', 'node3'] + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/haproxy-loadbalancer.conf.j2 b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/haproxy-loadbalancer.conf.j2 new file mode 100644 index 000000000..78fe9d3bf --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/haproxy-loadbalancer.conf.j2 @@ -0,0 +1,13 @@ +frontend loadbalancer + bind *:6443 + mode tcp + default_backend control_nodes + timeout client 1m + +backend control_nodes + mode tcp + balance roundrobin + server node2 node2:6443 + server node3 node3:6443 + timeout connect 30s + timeout server 30m diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/molecule.yml b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/molecule.yml new file mode 100644 index 000000000..d388fcad8 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/molecule.yml @@ -0,0 +1,57 @@ +--- + +dependency: + name: galaxy +driver: + name: docker +lint: | + set -e + yamllint -s . + ansible-lint --exclude molecule/ +platforms: + - name: node1 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node2 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node3 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: database + image: postgres:11-alpine + pre_build_image: true + command: "postgres" + env: + POSTGRES_PASSWORD: "verybadpass" + networks: + - name: k3snet + - name: loadbalancer + image: geerlingguy/docker-centos8-ansible:latest + pre_build_image: true + ports: + - "6443:6443" + networks: + - name: k3snet +provisioner: + name: ansible + options: + verbose: true diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/prepare.yml b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/prepare.yml new file mode 100644 index 000000000..5b318a5e2 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/prepare.yml @@ -0,0 +1,38 @@ +--- +- name: Prepare Load Balancer + hosts: loadbalancer + tasks: + - name: Ensure apt cache is updated + ansible.builtin.apt: + update_cache: true + when: ansible_pkg_mgr == 'apt' + + - name: Ensure HAProxy is installed + ansible.builtin.package: + name: haproxy + state: present + + - name: Ensure HAProxy config directory exists + ansible.builtin.file: + path: /usr/local/etc/haproxy + state: directory + mode: 0755 + + - name: Ensure HAProxy is configured + ansible.builtin.template: + src: haproxy-loadbalancer.conf.j2 + dest: /usr/local/etc/haproxy/haproxy.cfg + mode: 0644 + + - name: Ensure HAProxy service is started + command: haproxy -D -f /usr/local/etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid + args: + creates: /var/run/haproxy.pid + +- name: Prepare nodes + hosts: node* + tasks: + - name: Ensure apt cache is updated + ansible.builtin.apt: + update_cache: true + when: ansible_pkg_mgr == 'apt' diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/tests/test_default.py b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/tests/test_default.py new file mode 100644 index 000000000..eedd64a1d --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/tests/test_default.py @@ -0,0 +1,14 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_hosts_file(host): + f = host.file('/etc/hosts') + + assert f.exists + assert f.user == 'root' + assert f.group == 'root' diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/tests/test_default.pyc b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilitydb/tests/test_default.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28c2e2d42688dc5ec97e4aa0c0223353ad18569d GIT binary patch literal 1025 zcmdr~&2H2%5FRJnUs3jiP%k~_(#@gbh7eViszu9I+5!?Qp(st9)>*F|*^?DjyBFZ$ z;3aqo9-tjNoBqB5`5BMDjK7)rCgH!W&A-3HZ#gWlkk zB^iA>8GlU=kB14m^qG`7qcvE*2ejHDnQKbrE20Do2G^92f!zTN;77X+T_Mu|?A`;g z*MUb%klH$pT&9uTBpY+g;|p!kVvn{)YxL#%T`45TRf73<;UZp=yKgNwD6~q9(Z+{0 z4sku68{O2AE8XLc7F{!CL!rdcZIS@9q&%%g_ysIXIk>Sk^J7twVn#X z5V?tM&Q)fFj_2HnQV@dNqEMyD;;XBNp@WVoErj7YYIC_*JdNq-QkBj4^Uqz9M-(n$ zTJSP!Bo69J>{B-$^Ae^`unmIjIvHU7pwC{h$mvXD0w6thoZnSFXi!Ky$dnallBcFo zij;H&cOhO^ALiA661qj9`|as*dw7iyuI9pM<%!NYrtNsUQoUX+Hg=WW&*py|kQ>C? UnJ(zBdgp#cKbaQ+-E2F&1#loHZU6uP literal 0 HcmV?d00001 diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/converge.yml b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/converge.yml new file mode 100644 index 000000000..ce2f302c6 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/converge.yml @@ -0,0 +1,17 @@ +--- + +- name: Converge + hosts: node* + become: true + vars: + molecule_is_test: true + k3s_use_experimental: true + k3s_etcd_datastore: true + k3s_server: + secrets-encryption: true + pre_tasks: + - name: Set each node to be a control node + ansible.builtin.set_fact: + k3s_control_node: true + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/haproxy-loadbalancer.conf.j2 b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/haproxy-loadbalancer.conf.j2 new file mode 100644 index 000000000..78fe9d3bf --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/haproxy-loadbalancer.conf.j2 @@ -0,0 +1,13 @@ +frontend loadbalancer + bind *:6443 + mode tcp + default_backend control_nodes + timeout client 1m + +backend control_nodes + mode tcp + balance roundrobin + server node2 node2:6443 + server node3 node3:6443 + timeout connect 30s + timeout server 30m diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/molecule.yml b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/molecule.yml new file mode 100644 index 000000000..1c597025a --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/molecule.yml @@ -0,0 +1,49 @@ +--- + +dependency: + name: galaxy +driver: + name: docker +lint: | + set -e + yamllint -s . + ansible-lint --exclude molecule/ +platforms: + - name: node1 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node2 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node3 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: loadbalancer + image: geerlingguy/docker-centos8-ansible:latest + pre_build_image: true + ports: + - "6443:6443" + networks: + - name: k3snet +provisioner: + name: ansible + options: + verbose: true diff --git a/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/prepare.yml b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/prepare.yml new file mode 100644 index 000000000..5b318a5e2 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/highavailabilityetcd/prepare.yml @@ -0,0 +1,38 @@ +--- +- name: Prepare Load Balancer + hosts: loadbalancer + tasks: + - name: Ensure apt cache is updated + ansible.builtin.apt: + update_cache: true + when: ansible_pkg_mgr == 'apt' + + - name: Ensure HAProxy is installed + ansible.builtin.package: + name: haproxy + state: present + + - name: Ensure HAProxy config directory exists + ansible.builtin.file: + path: /usr/local/etc/haproxy + state: directory + mode: 0755 + + - name: Ensure HAProxy is configured + ansible.builtin.template: + src: haproxy-loadbalancer.conf.j2 + dest: /usr/local/etc/haproxy/haproxy.cfg + mode: 0644 + + - name: Ensure HAProxy service is started + command: haproxy -D -f /usr/local/etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid + args: + creates: /var/run/haproxy.pid + +- name: Prepare nodes + hosts: node* + tasks: + - name: Ensure apt cache is updated + ansible.builtin.apt: + update_cache: true + when: ansible_pkg_mgr == 'apt' diff --git a/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/converge.yml b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/converge.yml new file mode 100644 index 000000000..831fec78d --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/converge.yml @@ -0,0 +1,10 @@ +--- +- name: Converge + hosts: all + become: true + vars: + molecule_is_test: true + k3s_server: "{{ lookup('file', 'k3s_server.yml') | from_yaml }}" + k3s_agent: "{{ lookup('file', 'k3s_agent.yml') | from_yaml }}" + roles: + - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/k3s_agent.yml b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/k3s_agent.yml new file mode 100644 index 000000000..58fa21e70 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/k3s_agent.yml @@ -0,0 +1,8 @@ +--- + +node-label: + - "foo=bar" + - "hello=world" +kubelet-arg: + - "cloud-provider=external" + - "provider-id=azure" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/k3s_server.yml b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/k3s_server.yml new file mode 100644 index 000000000..0bc74c207 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/k3s_server.yml @@ -0,0 +1,14 @@ +--- + +flannel-backend: 'none' +disable-scheduler: true +disable-cloud-controller: true +disable-network-policy: true +disable: + - coredns + - traefik + - servicelb + - local-storage + - metrics-server +node-taint: + - "k3s-controlplane=true:NoExecute" diff --git a/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/molecule.yml b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/molecule.yml new file mode 100644 index 000000000..ecd9981a8 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/molecule.yml @@ -0,0 +1,44 @@ +--- + +dependency: + name: galaxy +driver: + name: docker +lint: | + set -e + yamllint -s . + ansible-lint --exclude molecule/ +platforms: + - name: node1 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node2 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet + - name: node3 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: ${MOLECULE_PREBUILT:-true} + networks: + - name: k3snet +provisioner: + name: ansible + options: + verbose: true +verifier: + name: ansible diff --git a/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/prepare.yml b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/prepare.yml new file mode 100644 index 000000000..2a1a341e8 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/prepare.yml @@ -0,0 +1,8 @@ +--- +- name: Prepare + hosts: all + tasks: + - name: Ensure apt cache is updated + ansible.builtin.apt: + update_cache: true + when: ansible_pkg_mgr == 'apt' diff --git a/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/verify.yml b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/verify.yml new file mode 100644 index 000000000..86afba4ff --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/nodeploy/verify.yml @@ -0,0 +1,9 @@ +--- +# This is an example playbook to execute Ansible tests. + +- name: Verify + hosts: all + tasks: + - name: Example assertion + ansible.builtin.assert: + that: true diff --git a/server/ansible/roles/xanmanning.k3s/molecule/requirements.txt b/server/ansible/roles/xanmanning.k3s/molecule/requirements.txt new file mode 100644 index 000000000..41787de00 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/molecule/requirements.txt @@ -0,0 +1,6 @@ +-r ../requirements.txt + +molecule[docker]>=3.2 +docker>=4.3.1 +yamllint>=1.25.0 +ansible-lint>=4.3.5 diff --git a/server/ansible/roles/xanmanning.k3s/requirements.txt b/server/ansible/roles/xanmanning.k3s/requirements.txt new file mode 100644 index 000000000..4c1a3286f --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/requirements.txt @@ -0,0 +1 @@ +ansible>2.9.16,!=2.10.0,!=2.10.1,!=2.10.2,!=2.10.3 diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/configure-k3s-cluster.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/configure-k3s-cluster.yml new file mode 100644 index 000000000..5e3ed0f7d --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/configure-k3s-cluster.yml @@ -0,0 +1,86 @@ +--- + +- name: "Ensure cluster token is captured from {{ k3s_control_delegate }}" + ansible.builtin.slurp: + path: "{{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }}/server/token" + register: k3s_slurped_cluster_token + delegate_to: "{{ k3s_control_delegate }}" + when: + - k3s_control_token is not defined + - not ansible_check_mode + become: "{{ k3s_become_for_kubectl | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure cluster token is formatted correctly for use in templates + ansible.builtin.set_fact: + k3s_control_token: "{{ k3s_slurped_cluster_token.content | b64decode }}" + when: k3s_control_token is not defined and not ansible_check_mode + +- name: Ensure dummy cluster token is defined for ansible_check_mode + ansible.builtin.set_fact: + k3s_control_token: "{{ k3s_control_delegate | to_uuid }}" + check_mode: false + when: k3s_control_token is not defined and ansible_check_mode + +- name: Ensure the cluster token file location exists + ansible.builtin.file: + path: "{{ k3s_token_location | dirname }}" + state: directory + mode: 0755 + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure k3s cluster token file is present on workers and secondary control nodes + ansible.builtin.template: + src: cluster-token.j2 + dest: "{{ k3s_token_location }}" + mode: 0600 + become: "{{ k3s_become_for_install_dir | ternary(true, false, k3s_become_for_all) }}" + when: (k3s_control_node and not k3s_primary_control_node) + or not k3s_control_node + notify: + - restart k3s + +- name: Ensure k3s service unit file is present + ansible.builtin.template: + src: k3s.service.j2 + dest: "{{ k3s_systemd_unit_dir }}/k3s.service" + mode: 0644 + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" + notify: + - reload systemd + - restart k3s + +- name: Ensure k3s config file exists + ansible.builtin.template: + src: config.yaml.j2 + dest: "{{ k3s_config_file }}" + mode: 0644 + notify: + - reload systemd + - restart k3s + become: "{{ k3s_become_for_install_dir | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure secondary controllers are started + ansible.builtin.systemd: + name: k3s + state: started + enabled: "{{ k3s_start_on_boot }}" + register: ensure_secondary_controllers_started + failed_when: + - ensure_secondary_controllers_started is not succeeded + - not ansible_check_mode + until: ensure_secondary_controllers_started is succeeded + retries: "{{ ansible_play_hosts_all | length }}" + delay: 5 + when: + - k3s_control_node + - not k3s_primary_control_node + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" + +- import_tasks: ../validate/state/control-plane.yml + when: not k3s_skip_validation + +- name: Flush Handlers + meta: flush_handlers + +- import_tasks: ../validate/state/nodes.yml + when: not k3s_skip_validation diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/docker/amazon/install.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/amazon/install.yml new file mode 100644 index 000000000..37f5ef4ee --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/amazon/install.yml @@ -0,0 +1,12 @@ +--- + +- name: Ensure docker is installed using amazon-linux-extras + ansible.builtin.command: amazon-linux-extras install docker + args: + creates: /etc/docker + notify: + - restart docker + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" + +- name: Flush Handlers + meta: flush_handlers diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/docker/archlinux/install-prerequisites.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/archlinux/install-prerequisites.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/archlinux/install-prerequisites.yml @@ -0,0 +1 @@ +--- diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/docker/archlinux/install.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/archlinux/install.yml new file mode 100644 index 000000000..5e63d6cfe --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/archlinux/install.yml @@ -0,0 +1,16 @@ +--- + +- name: Ensure docker is installed using Pacman + community.general.pacman: + name: docker + state: present + register: ensure_docker_prerequisites_installed + until: ensure_docker_prerequisites_installed is succeeded + retries: 3 + delay: 10 + notify: + - restart docker + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" + +- name: Flush Handlers + meta: flush_handlers diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/docker/debian/install-prerequisites.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/debian/install-prerequisites.yml new file mode 100644 index 000000000..b60ae5cb9 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/debian/install-prerequisites.yml @@ -0,0 +1,29 @@ +--- + +- name: Ensure Docker prerequisites are installed + ansible.builtin.apt: + name: + - apt-transport-https + - ca-certificates + - curl + - "{{ 'gnupg2' if ansible_distribution == 'Debian' else 'gnupg-agent' }}" + - software-properties-common + state: present + register: ensure_docker_prerequisites_installed + until: ensure_docker_prerequisites_installed is succeeded + retries: 3 + delay: 10 + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure Docker APT key is present + ansible.builtin.apt_key: + url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg + state: present + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure Docker repository is installed and configured + ansible.builtin.apt_repository: + filename: docker-ce + repo: "deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable" + update_cache: true + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/docker/install.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/install.yml new file mode 100644 index 000000000..27fe28748 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/install.yml @@ -0,0 +1,16 @@ +--- + +- name: Ensure docker is installed + ansible.builtin.package: + name: + - docker-ce + - docker-ce-cli + - containerd.io + state: present + register: ensure_docker_installed + until: ensure_docker_installed is succeeded + retries: 3 + delay: 10 + notify: + - restart docker + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/docker/opensuse-leap/install.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/opensuse-leap/install.yml new file mode 100644 index 000000000..91b923d86 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/opensuse-leap/install.yml @@ -0,0 +1,16 @@ +--- + +- name: Ensure docker is installed using Zypper + community.general.zypper: + name: docker + state: present + register: ensure_docker_prerequisites_installed + until: ensure_docker_prerequisites_installed is succeeded + retries: 3 + delay: 10 + notify: + - restart docker + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" + +- name: Flush Handlers + meta: flush_handlers diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/docker/redhat/install-prerequisites.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/redhat/install-prerequisites.yml new file mode 100644 index 000000000..a266d0aef --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/redhat/install-prerequisites.yml @@ -0,0 +1,55 @@ +--- + +- name: Ensure python-dnf is installed + ansible.builtin.package: + name: "{{ 'python-dnf' if ansible_python_version is version_compare('3.0.0', '<') else 'python3-dnf' }}" + state: present + register: ensure_python_dnf_installed + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" + until: ensure_python_dnf_installed is succeeded + retries: 3 + delay: 10 + when: ansible_pkg_mgr == 'dnf' + +- name: Ensure Docker prerequisites are installed + ansible.builtin.yum: + name: + - yum-utils + - device-mapper-persistent-data + - lvm2 + state: present + register: ensure_docker_prerequisites_installed + until: ensure_docker_prerequisites_installed is succeeded + retries: 3 + delay: 10 + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" + +- name: Check to see if Docker repository is available for this distribution + ansible.builtin.uri: + url: "https://download.docker.com/linux/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}" + register: k3s_redhat_repo_check + failed_when: false + changed_when: false + +- name: Ensure Docker repository is installed and configured + ansible.builtin.yum_repository: + name: docker-ce + description: Docker CE Repository + baseurl: https://download.docker.com/linux/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/$basearch/stable + gpgkey: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg + enabled: true + gpgcheck: true + state: present + when: + - ansible_distribution | lower not in ['amazon'] + - k3s_redhat_repo_check.status == 200 + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure Docker repository is installed and configured from file + ansible.builtin.command: yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo + args: + creates: /etc/yum.repos.d/docker-ce.repo + when: + - ansible_distribution | lower not in ['amazon'] + - k3s_redhat_repo_check.status != 200 + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/docker/suse/install-prerequisites.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/suse/install-prerequisites.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/suse/install-prerequisites.yml @@ -0,0 +1 @@ +--- diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/docker/suse/install.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/suse/install.yml new file mode 100644 index 000000000..91b923d86 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/docker/suse/install.yml @@ -0,0 +1,16 @@ +--- + +- name: Ensure docker is installed using Zypper + community.general.zypper: + name: docker + state: present + register: ensure_docker_prerequisites_installed + until: ensure_docker_prerequisites_installed is succeeded + retries: 3 + delay: 10 + notify: + - restart docker + become: "{{ k3s_become_for_package_install | ternary(true, false, k3s_become_for_all) }}" + +- name: Flush Handlers + meta: flush_handlers diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/download-k3s.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/download-k3s.yml new file mode 100644 index 000000000..076d7c53d --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/download-k3s.yml @@ -0,0 +1,51 @@ +--- + +- name: Ensure target host architecture information is set as a fact + ansible.builtin.set_fact: + k3s_arch: "{{ k3s_arch_lookup[ansible_architecture].arch }}" + k3s_arch_suffix: "{{ k3s_arch_lookup[ansible_architecture].suffix }}" + check_mode: false + +- name: Ensure URLs are set as facts for downloading binaries + ansible.builtin.set_fact: + k3s_binary_url: "{{ k3s_github_download_url }}/{{ k3s_release_version }}/k3s{{ k3s_arch_suffix }}" + k3s_hash_url: "{{ k3s_github_download_url }}/{{ k3s_release_version }}/sha256sum-{{ k3s_arch }}.txt" + check_mode: false + +- name: Override k3s_binary_url and k3s_hash_url facts for testing specific commit + ansible.builtin.set_fact: + k3s_binary_url: "https://storage.googleapis.com/k3s-ci-builds/k3s{{ k3s_arch_suffix }}-{{ k3s_release_version }}" + k3s_hash_url: "https://storage.googleapis.com/k3s-ci-builds/k3s{{ k3s_arch_suffix }}-{{ k3s_release_version }}.sha256sum" + when: + - k3s_release_version | regex_search("^[a-z0-9]{40}$") + check_mode: false + +- name: Ensure the k3s hashsum is downloaded + ansible.builtin.uri: + url: "{{ k3s_hash_url }}" + return_content: true + register: k3s_hash_sum_raw + check_mode: false + +- name: Ensure sha256sum is set from hashsum variable + ansible.builtin.set_fact: + k3s_hash_sum: "{{ (k3s_hash_sum_raw.content.split('\n') | + select('search', 'k3s' + k3s_arch_suffix) | + reject('search', 'images') | + first).split() | first }}" + changed_when: false + check_mode: false + +- name: Ensure installation directory exists + ansible.builtin.file: + path: "{{ k3s_install_dir }}" + state: directory + mode: 0755 + +- name: Ensure k3s binary is downloaded + ansible.builtin.get_url: + url: "{{ k3s_binary_url }}" + dest: "{{ k3s_install_dir }}/k3s-{{ k3s_release_version }}" + checksum: "sha256:{{ k3s_hash_sum }}" + mode: 0755 + become: "{{ k3s_become_for_install_dir | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/get-systemd-context.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/get-systemd-context.yml new file mode 100644 index 000000000..5d4c4359a --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/get-systemd-context.yml @@ -0,0 +1,10 @@ +--- + +- name: Ensure systemd context is correct if we are running k3s rootless + ansible.builtin.set_fact: + k3s_systemd_context: user + k3s_systemd_unit_dir: "{{ ansible_user_dir }}/.config/systemd/user" + when: + - k3s_runtime_config is defined + - k3s_runtime_config.rootless is defined + - k3s_runtime_config.rootless diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/get-version.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/get-version.yml new file mode 100644 index 000000000..9969c54b9 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/get-version.yml @@ -0,0 +1,32 @@ +--- + +- name: Ensure k3s_release_version is set to default if false + ansible.builtin.set_fact: + k3s_release_version: "{{ k3s_release_channel }}" + check_mode: false + when: + - k3s_release_version is defined + - not k3s_release_version + +- name: Ensure the default release channel is set + ansible.builtin.set_fact: + k3s_release_channel: "{{ k3s_release_version | default('stable') }}" + check_mode: false + +- name: Get the latest release version from k3s.io + ansible.builtin.uri: + url: "{{ k3s_api_releases }}" + return_content: true + body_format: json + register: k3s_latest_release + no_log: true + check_mode: false + +- name: Ensure the release version is set as a fact + ansible.builtin.set_fact: + k3s_release_version: "{{ item.latest }}" + loop: "{{ k3s_latest_release.json.data }}" + check_mode: false + when: + - item.name == k3s_release_channel + - item.type == "channel" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s-directories.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s-directories.yml new file mode 100644 index 000000000..d2ed75797 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s-directories.yml @@ -0,0 +1,12 @@ +--- + +- name: Ensure {{ directory.name }} exists + ansible.builtin.file: + path: "{{ directory.path }}" + state: directory + mode: "{{ directory.mode | default(0755) }}" + become: "{{ k3s_become_for_directory_creation | ternary(true, false, k3s_become_for_all) }}" + when: + - directory.path is defined + - directory.path | length > 0 + - directory.path != omit diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s-node.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s-node.yml new file mode 100644 index 000000000..a303c2ecd --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s-node.yml @@ -0,0 +1,58 @@ +--- + +- name: Ensure k3s is linked into the installation destination + ansible.builtin.file: + src: "{{ k3s_install_dir }}/k3s-{{ k3s_release_version }}" + dest: "{{ k3s_install_dir }}/{{ item }}" + state: "{{ 'hard' if k3s_install_hard_links else 'link' }}" + force: "{{ k3s_install_hard_links }}" + mode: 0755 + loop: + - k3s + - kubectl + - crictl + - ctr + when: not ansible_check_mode + notify: + - restart k3s + become: "{{ k3s_become_for_install_dir | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure k3s config file exists + ansible.builtin.template: + src: config.yaml.j2 + dest: "{{ k3s_config_file }}" + mode: 0644 + notify: + - reload systemd + - restart k3s + become: "{{ k3s_become_for_install_dir | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure k3s service unit file is present + ansible.builtin.template: + src: k3s.service.j2 + dest: "{{ k3s_systemd_unit_dir }}/k3s.service" + mode: 0644 + notify: + - reload systemd + - restart k3s + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure k3s killall script is present + ansible.builtin.template: + src: k3s-killall.sh.j2 + dest: "/usr/local/bin/k3s-killall.sh" + mode: 0700 + become: "{{ k3s_become_for_usr_local_bin | ternary(true, false, k3s_become_for_all) }}" + when: + - k3s_runtime_config is defined + - ("rootless" not in k3s_runtime_config or not k3s_runtime_config.rootless) + +- name: Ensure k3s uninstall script is present + ansible.builtin.template: + src: k3s-uninstall.sh.j2 + dest: "/usr/local/bin/k3s-uninstall.sh" + mode: 0700 + become: "{{ k3s_become_for_usr_local_bin | ternary(true, false, k3s_become_for_all) }}" + when: + - k3s_runtime_config is defined + - ("rootless" not in k3s_runtime_config or not k3s_runtime_config.rootless) diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s.yml new file mode 100644 index 000000000..f2fc34d13 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/install-k3s.yml @@ -0,0 +1,32 @@ +--- + +- include_tasks: install-k3s-directories.yml + loop: "{{ k3s_ensure_directories_exist }}" + loop_control: + loop_var: directory + +- include_tasks: install-k3s-node.yml + when: + - ((k3s_control_node and k3s_controller_list | length == 1) + or (k3s_primary_control_node and k3s_controller_list | length > 1)) + - not ansible_check_mode + +- name: Flush Handlers + meta: flush_handlers + +- include_tasks: install-k3s-node.yml + when: k3s_build_cluster + +- name: Ensure k3s initial control plane server is started + ansible.builtin.systemd: + name: k3s + state: started + enabled: "{{ k3s_start_on_boot }}" + scope: "{{ k3s_systemd_context }}" + register: k3s_systemd_start_k3s + failed_when: + - k3s_systemd_start_k3s is not succeeded + - not ansible_check_mode + when: (k3s_control_node and k3s_controller_list | length == 1) + or (k3s_primary_control_node and k3s_controller_list | length > 1) + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/preconfigure-k3s-auto-deploying-manifests.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/preconfigure-k3s-auto-deploying-manifests.yml new file mode 100644 index 000000000..82b246d92 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/preconfigure-k3s-auto-deploying-manifests.yml @@ -0,0 +1,18 @@ +--- + +- name: Ensure that the manifests directory exists + ansible.builtin.file: + state: directory + path: "{{ k3s_server_manifests_dir }}" + mode: 0755 + when: k3s_server_manifests_templates | length > 0 + become: "{{ k3s_become_for_directory_creation | ternary(true, false, k3s_become_for_all) }}" + +# https://rancher.com/docs/k3s/latest/en/advanced/#auto-deploying-manifests +- name: Ensure auto-deploying manifests are copied to controllers + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ k3s_server_manifests_dir }}/{{ item | basename | replace('.j2','') }}" + mode: 0644 + loop: "{{ k3s_server_manifests_templates }}" + become: "{{ k3s_become_for_directory_creation | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/build/preconfigure-k3s.yml b/server/ansible/roles/xanmanning.k3s/tasks/build/preconfigure-k3s.yml new file mode 100644 index 000000000..54b85688d --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/build/preconfigure-k3s.yml @@ -0,0 +1,125 @@ +--- + +- name: Ensure k3s_build_cluster is false if running against a single node. + ansible.builtin.set_fact: + k3s_build_cluster: false + when: + - ansible_play_hosts_all | length < 2 + - k3s_registration_address is not defined + +- name: Ensure k3s control node fact is set + ansible.builtin.set_fact: + k3s_control_node: "{{ 'false' if k3s_build_cluster else 'true' }}" + when: k3s_control_node is not defined + +- name: Ensure k3s primary control node fact is set + ansible.builtin.set_fact: + k3s_primary_control_node: "{{ 'false' if k3s_build_cluster else 'true' }}" + when: k3s_primary_control_node is not defined + +- name: Ensure k3s control plane port is captured + ansible.builtin.set_fact: + k3s_control_plane_port: "{{ k3s_runtime_config['https-listen-port'] | default(6443) }}" + delegate_to: k3s_primary_control_node + +- name: Ensure a count of control nodes is generated from ansible_play_hosts_all + ansible.builtin.set_fact: + k3s_controller_list: "{{ k3s_controller_list + [ item ] }}" + when: + - hostvars[item].k3s_control_node is defined + - hostvars[item].k3s_control_node + loop: "{{ ansible_play_hosts_all }}" + +- name: Ensure a k3s control node is defined if none are found in ansible_play_hosts_all + block: + + - name: Set the control host + ansible.builtin.set_fact: + k3s_control_node: true + when: inventory_hostname == ansible_play_hosts_all[0] + + - name: Ensure a count of control nodes is generated + ansible.builtin.set_fact: + k3s_controller_list: "{{ k3s_controller_list + [ item ] }}" + when: + - hostvars[item].k3s_control_node is defined + - hostvars[item].k3s_control_node + loop: "{{ ansible_play_hosts_all }}" + + when: + - k3s_controller_list | length < 1 + - k3s_build_cluster is defined + - k3s_build_cluster + +- name: Ensure a primary k3s control node is defined if multiple are found in ansible_play_hosts_all + ansible.builtin.set_fact: + k3s_primary_control_node: true + when: + - k3s_controller_list is defined + - k3s_controller_list | length > 1 + - inventory_hostname == k3s_controller_list[0] + - k3s_build_cluster is defined + - k3s_build_cluster + +- name: Ensure ansible_host is mapped to inventory_hostname + ansible.builtin.lineinfile: + path: /tmp/inventory.txt + line: >- + {{ item }} + @@@ + {{ hostvars[item].ansible_host | default(hostvars[item].ansible_fqdn) }} + @@@ + C_{{ hostvars[item].k3s_control_node }} + @@@ + P_{{ hostvars[item].k3s_primary_control_node | default(False) }} + create: true + regexp: "^{{ item }} @@@ {{ hostvars[item].ansible_host | default(hostvars[item].ansible_fqdn) }}" + mode: 0600 + loop: "{{ ansible_play_hosts_all }}" + check_mode: false + when: hostvars[item].k3s_control_node is defined + +- name: Delegate an initializing control plane node + block: + - name: Lookup control node from file + ansible.builtin.command: "grep '{{ 'P_True' if (k3s_controller_list | length > 1) else 'C_True' }}' /tmp/inventory.txt" + changed_when: false + check_mode: false + register: k3s_control_delegate_raw + + - name: Ensure control node is delegated for obtaining a cluster token + ansible.builtin.set_fact: + k3s_control_delegate: "{{ k3s_control_delegate_raw.stdout.split(' @@@ ')[0] }}" + check_mode: false + when: k3s_control_delegate is not defined + + - name: Ensure the node registration address is defined from k3s_control_node_address + ansible.builtin.set_fact: + k3s_registration_address: "{{ k3s_control_node_address }}" + check_mode: false + when: k3s_control_node_address is defined + + - name: Ensure the node registration address is defined + ansible.builtin.set_fact: + k3s_registration_address: "{{ hostvars[k3s_control_delegate].ansible_host | default(hostvars[k3s_control_delegate].ansible_fqdn) }}" + check_mode: false + when: + - k3s_registration_address is not defined + - k3s_control_node_address is not defined + + when: k3s_registration_address is not defined + or k3s_control_delegate is not defined + +- name: Ensure k3s_runtime_config is set for control plane + ansible.builtin.set_fact: + k3s_runtime_config: "{{ (k3s_server | default({})) | combine((k3s_agent | default({}))) }}" + when: + - (k3s_server is defined or k3s_agent is defined) + - (k3s_control_node is defined and k3s_control_node) + +- name: Ensure k3s_runtime_config is set for agents + ansible.builtin.set_fact: + k3s_runtime_config: "{{ (k3s_agent | default({})) }}" + when: + - k3s_agent is defined + - (k3s_control_node is not defined or not k3s_control_node) diff --git a/server/ansible/roles/xanmanning.k3s/tasks/main.yml b/server/ansible/roles/xanmanning.k3s/tasks/main.yml new file mode 100644 index 000000000..bcefc185e --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/main.yml @@ -0,0 +1,5 @@ +--- + +- import_tasks: validate/pre-flight.yml + +- include_tasks: state-{{ (k3s_state | lower) | default('installed') }}.yml diff --git a/server/ansible/roles/xanmanning.k3s/tasks/operate/start-k3s.yml b/server/ansible/roles/xanmanning.k3s/tasks/operate/start-k3s.yml new file mode 100644 index 000000000..f393d5b80 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/operate/start-k3s.yml @@ -0,0 +1,20 @@ +--- + +- name: Ensure k3s service is started + ansible.builtin.systemd: + name: k3s + state: started + enabled: "{{ k3s_start_on_boot }}" + when: k3s_non_root is not defined or not k3s_non_root + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure k3s service is started + ansible.builtin.systemd: + name: k3s + state: started + enabled: "{{ k3s_start_on_boot }}" + scope: user + when: + - k3s_non_root is defined + - k3s_non_root + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/operate/stop-k3s.yml b/server/ansible/roles/xanmanning.k3s/tasks/operate/stop-k3s.yml new file mode 100644 index 000000000..d1c97d90a --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/operate/stop-k3s.yml @@ -0,0 +1,20 @@ +--- + +- name: Ensure k3s service is stopped + ansible.builtin.systemd: + name: k3s + state: stopped + enabled: "{{ k3s_start_on_boot }}" + when: k3s_non_root is not defined or not k3s_non_root + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure k3s service is started + ansible.builtin.systemd: + name: k3s + state: stopped + enabled: "{{ k3s_start_on_boot }}" + scope: user + when: + - k3s_non_root is defined + - k3s_non_root + become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/state-downloaded.yml b/server/ansible/roles/xanmanning.k3s/tasks/state-downloaded.yml new file mode 100644 index 000000000..6ec330d6f --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/state-downloaded.yml @@ -0,0 +1,6 @@ +--- + +- import_tasks: build/get-version.yml + when: k3s_release_version is not defined or not k3s_release_version + +- import_tasks: build/download-k3s.yml diff --git a/server/ansible/roles/xanmanning.k3s/tasks/state-installed.yml b/server/ansible/roles/xanmanning.k3s/tasks/state-installed.yml new file mode 100644 index 000000000..a6939270f --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/state-installed.yml @@ -0,0 +1,53 @@ +--- + +- import_tasks: build/preconfigure-k3s.yml + +- import_tasks: teardown/drain-and-remove-nodes.yml + +- import_tasks: build/get-version.yml + when: k3s_release_version is not defined + or not k3s_release_version + or k3s_release_version is not regex('\\+k3s[1-9]$') + +- import_tasks: validate/main.yml + when: not k3s_skip_validation + +- import_tasks: build/get-systemd-context.yml + +- name: Ensure docker installation tasks are run + block: + + - include_tasks: build/docker/{{ ansible_os_family | lower }}/install-prerequisites.yml + + - import_tasks: build/docker/install.yml + when: ansible_distribution | replace(" ", "-") | lower not in ['amazon', 'suse', 'opensuse-leap', 'archlinux'] + + - include_tasks: build/docker/{{ ansible_distribution | replace(" ", "-") | lower }}/install.yml + when: ansible_distribution | replace(" ", "-") | lower in ['amazon', 'suse', 'opensuse-leap', 'archlinux'] + + when: + - ('docker' in k3s_runtime_config and k3s_runtime_config.docker) + - ('rootless' not in k3s_runtime_config or not k3s_runtime_config.rootless) + +- name: Flush Handlers + meta: flush_handlers + +- import_tasks: build/download-k3s.yml + +- import_tasks: build/preconfigure-k3s-auto-deploying-manifests.yml + when: + - k3s_control_node + - k3s_server_manifests_templates | length > 0 + +- import_tasks: build/install-k3s.yml + +- include_tasks: validate/configuration/cluster-init.yml + when: + - k3s_control_delegate is defined + - k3s_control_delegate == inventory_hostname + +- import_tasks: build/configure-k3s-cluster.yml + when: + - k3s_build_cluster is defined + - k3s_build_cluster + - k3s_registration_address is defined diff --git a/server/ansible/roles/xanmanning.k3s/tasks/state-restarted.yml b/server/ansible/roles/xanmanning.k3s/tasks/state-restarted.yml new file mode 100644 index 000000000..85186a875 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/state-restarted.yml @@ -0,0 +1,5 @@ +--- + +- import_tasks: operate/stop-k3s.yml + +- import_tasks: operate/start-k3s.yml diff --git a/server/ansible/roles/xanmanning.k3s/tasks/state-started.yml b/server/ansible/roles/xanmanning.k3s/tasks/state-started.yml new file mode 100644 index 000000000..5441988c7 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/state-started.yml @@ -0,0 +1,3 @@ +--- + +- import_tasks: operate/start-k3s.yml diff --git a/server/ansible/roles/xanmanning.k3s/tasks/state-stopped.yml b/server/ansible/roles/xanmanning.k3s/tasks/state-stopped.yml new file mode 100644 index 000000000..af1e1bd64 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/state-stopped.yml @@ -0,0 +1,3 @@ +--- + +- import_tasks: operate/stop-k3s.yml diff --git a/server/ansible/roles/xanmanning.k3s/tasks/state-uninstalled.yml b/server/ansible/roles/xanmanning.k3s/tasks/state-uninstalled.yml new file mode 100644 index 000000000..d96315f93 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/state-uninstalled.yml @@ -0,0 +1,25 @@ +--- + +- import_tasks: build/preconfigure-k3s.yml + +- import_tasks: teardown/drain-and-remove-nodes.yml + +- import_tasks: teardown/uninstall-k3s.yml + +- name: Ensure docker uninstall tasks are run + block: + + - import_tasks: teardown/docker/uninstall.yml + when: ansible_distribution | replace(" ", "-") | lower not in ['amazon', 'suse', 'opensuse-leap', 'archlinux'] + + - include_tasks: teardown/docker/{{ ansible_distribution | replace(" ", "-") | lower }}/uninstall.yml + when: ansible_distribution | replace(" ", "-") | lower in ['amazon', 'suse', 'opensuse-leap', 'archlinux'] + + - include_tasks: teardown/docker/{{ ansible_os_family | lower }}/uninstall-prerequisites.yml + + when: + - ('docker' in k3s_runtime_config and k3s_runtime_config.docker) + - ('rootless' not in k3s_runtime_config or not k3s_runtime_config.rootless) + +- import_tasks: validate/state/uninstalled.yml + when: not k3s_skip_validation diff --git a/server/ansible/roles/xanmanning.k3s/tasks/state-validated.yml b/server/ansible/roles/xanmanning.k3s/tasks/state-validated.yml new file mode 100644 index 000000000..a94e0b364 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/state-validated.yml @@ -0,0 +1,7 @@ +--- + +- import_tasks: validate/pre-flight.yml + +- import_tasks: validate/main.yml + +- import_tasks: validate/post-install.yml diff --git a/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/amazon/uninstall.yml b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/amazon/uninstall.yml new file mode 100644 index 000000000..5f353f493 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/amazon/uninstall.yml @@ -0,0 +1,7 @@ +--- + +- name: Ensure docker is uninstalled using amazon-linux-extras + ansible.builtin.command: amazon-linux-extras uninstall docker + register: uninstall_docker_from_amazon_linux + changed_when: uninstall_docker_from_amazon_linux.rc == 0 + become: "{{ k3s_become_for_uninstall | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/archlinux/uninstall.yml b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/archlinux/uninstall.yml new file mode 100644 index 000000000..507a42762 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/archlinux/uninstall.yml @@ -0,0 +1,11 @@ +--- + +- name: Ensure docker is uninstalled using Pacman + community.general.pacman: + name: docker + state: absent + register: ensure_docker_uninstalled + until: ensure_docker_uninstalled is succeeded + retries: 3 + delay: 10 + become: "{{ k3s_become_for_uninstall | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/debian/uninstall-prerequisites.yml b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/debian/uninstall-prerequisites.yml new file mode 100644 index 000000000..5b87049e2 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/debian/uninstall-prerequisites.yml @@ -0,0 +1,15 @@ +--- + +- name: Ensure Docker repository is uninstalled + ansible.builtin.apt_repository: + filename: docker-ce + repo: "deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable" + update_cache: false + state: absent + become: "{{ k3s_become_for_uninstall | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure Docker APT key is uninstalled + ansible.builtin.apt_key: + url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg + state: absent + become: "{{ k3s_become_for_uninstall | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/opensuse-leap/uninstall.yml b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/opensuse-leap/uninstall.yml new file mode 100644 index 000000000..d02535935 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/opensuse-leap/uninstall.yml @@ -0,0 +1,11 @@ +--- + +- name: Ensure docker is installed using Zypper + community.general.zypper: + name: docker + state: absent + register: ensure_docker_uninstalled + until: ensure_docker_uninstalled is succeeded + retries: 3 + delay: 10 + become: "{{ k3s_become_for_uninstall | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/redhat/uninstall-prerequisites.yml b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/redhat/uninstall-prerequisites.yml new file mode 100644 index 000000000..3961c60c7 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/redhat/uninstall-prerequisites.yml @@ -0,0 +1,13 @@ +--- + +- name: Ensure Docker repository is removed + ansible.builtin.yum_repository: + name: docker-ce + description: Docker CE Repository + baseurl: https://download.docker.com/linux/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/$basearch/stable + gpgkey: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg + enabled: false + gpgcheck: true + state: absent + when: ansible_distribution | lower not in ['amazon'] + become: "{{ k3s_become_for_uninstall | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/suse/uninstall-prerequisites.yml b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/suse/uninstall-prerequisites.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/suse/uninstall-prerequisites.yml @@ -0,0 +1 @@ +--- diff --git a/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/suse/uninstall.yml b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/suse/uninstall.yml new file mode 100644 index 000000000..e96755947 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/suse/uninstall.yml @@ -0,0 +1,11 @@ +--- + +- name: Ensure docker is uninstalled using Zypper + community.general.zypper: + name: docker + state: absent + register: ensure_docker_uninstalled + until: ensure_docker_uninstalled is succeeded + retries: 3 + delay: 10 + become: "{{ k3s_become_for_uninstall | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/uninstall.yml b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/uninstall.yml new file mode 100644 index 000000000..736cd3852 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/teardown/docker/uninstall.yml @@ -0,0 +1,14 @@ +--- + +- name: Ensure docker is uninstalled + ansible.builtin.package: + name: + - docker-ce + - docker-ce-cli + - containerd.io + state: absent + register: ensure_docker_uninstalled + until: ensure_docker_uninstalled is succeeded + retries: 3 + delay: 10 + become: "{{ k3s_become_for_uninstall | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/teardown/drain-and-remove-nodes.yml b/server/ansible/roles/xanmanning.k3s/tasks/teardown/drain-and-remove-nodes.yml new file mode 100644 index 000000000..b99a843cf --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/teardown/drain-and-remove-nodes.yml @@ -0,0 +1,51 @@ +--- + +- name: Check if kubectl exists + ansible.builtin.stat: + path: "{{ k3s_install_dir }}/kubectl" + register: k3s_check_kubectl + become: "{{ k3s_become_for_kubectl | ternary(true, false, k3s_become_for_all) }}" + +- name: Clean up nodes that are in an uninstalled state + block: + + - name: Gather a list of nodes + ansible.builtin.command: "{{ k3s_install_dir }}/kubectl get nodes" + changed_when: false + failed_when: false + delegate_to: "{{ k3s_control_delegate }}" + run_once: true + register: kubectl_get_nodes_result + become: "{{ k3s_become_for_kubectl | ternary(true, false, k3s_become_for_all) }}" + + - name: Ensure uninstalled nodes are drained + ansible.builtin.command: >- + {{ k3s_install_dir }}/kubectl drain {{ item }} + --ignore-daemonsets + --delete-local-data + --force + delegate_to: "{{ k3s_control_delegate }}" + run_once: true + when: + - item in kubectl_get_nodes_result.stdout + - hostvars[item].k3s_state is defined + - hostvars[item].k3s_state == 'uninstalled' + loop: "{{ ansible_play_hosts_all }}" + become: "{{ k3s_become_for_kubectl | ternary(true, false, k3s_become_for_all) }}" + + - name: Ensure uninstalled nodes are removed + ansible.builtin.command: "{{ k3s_install_dir }}/kubectl delete node {{ item }}" + delegate_to: "{{ k3s_control_delegate }}" + run_once: true + when: + - item in kubectl_get_nodes_result.stdout + - hostvars[item].k3s_state is defined + - hostvars[item].k3s_state == 'uninstalled' + loop: "{{ ansible_play_hosts_all }}" + become: "{{ k3s_become_for_kubectl | ternary(true, false, k3s_become_for_all) }}" + + when: + - k3s_check_kubectl.stat.exists is defined + - k3s_check_kubectl.stat.exists + - k3s_control_delegate is defined + - not ansible_check_mode diff --git a/server/ansible/roles/xanmanning.k3s/tasks/teardown/uninstall-k3s.yml b/server/ansible/roles/xanmanning.k3s/tasks/teardown/uninstall-k3s.yml new file mode 100644 index 000000000..b4bdeb207 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/teardown/uninstall-k3s.yml @@ -0,0 +1,52 @@ +--- + +- name: Check to see if k3s-killall.sh exits + ansible.builtin.stat: + path: /usr/local/bin/k3s-killall.sh + register: check_k3s_killall_script + +- name: Check to see if k3s-uninstall.sh exits + ansible.builtin.stat: + path: /usr/local/bin/k3s-uninstall.sh + register: check_k3s_uninstall_script + +- name: Check to see if docker is present + ansible.builtin.command: which docker + failed_when: false + changed_when: false + register: check_k3s_docker_path + +- name: Run k3s-killall.sh + ansible.builtin.command: /usr/local/bin/k3s-killall.sh + register: k3s_killall + changed_when: k3s_killall.rc == 0 + when: check_k3s_killall_script.stat.exists + become: "{{ k3s_become_for_uninstall | ternary(true, false, k3s_become_for_all) }}" + +- name: Run k3s-uninstall.sh + ansible.builtin.command: /usr/local/bin/k3s-uninstall.sh + args: + removes: /usr/local/bin/k3s-uninstall.sh + register: k3s_uninstall + changed_when: k3s_uninstall.rc == 0 + when: check_k3s_uninstall_script.stat.exists + become: "{{ k3s_become_for_uninstall | ternary(true, false, k3s_become_for_all) }}" + +- name: Ensure hard links are removed + ansible.builtin.file: + path: "{{ k3s_install_dir }}/{{ item }}" + state: absent + loop: + - kubectl + - crictl + - ctr + when: + - k3s_install_hard_links + - not ansible_check_mode + become: "{{ k3s_become_for_uninstall | ternary(true, false, k3s_become_for_all) }}" + +- name: Clean up Docker + ansible.builtin.command: docker system prune -a --force + when: + - ("docker" in k3s_runtime_config and k3s_runtime_config.docker) + - check_k3s_docker_path.rc == 0 diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/cluster-init.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/cluster-init.yml new file mode 100644 index 000000000..8e8bc9765 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/cluster-init.yml @@ -0,0 +1,19 @@ +--- + +- name: Check that the initial control plane server is available to accept connections + ansible.builtin.wait_for: + port: "{{ k3s_runtime_config['https-listen-port'] | default('6443') }}" + host: "{{ k3s_runtime_config['bind-address'] | default('127.0.0.1') }}" + delay: 5 + sleep: 5 + timeout: 300 + +- name: Check that cluster-token exists + ansible.builtin.stat: + path: "{{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }}/server/token" + register: k3s_check_cluster_token + check_mode: false + failed_when: + - not k3s_check_cluster_token.stat.exists + - not ansible_check_mode + become: "{{ k3s_become_for_kubectl | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/control-node-count.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/control-node-count.yml new file mode 100644 index 000000000..72f8581de --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/control-node-count.yml @@ -0,0 +1,45 @@ +--- + +- name: Check the conditions when a single controller is defined + ansible.builtin.assert: + that: + - (k3s_controller_list | length == 1) + - ("datastore-endpoint" not in k3s_runtime_config or not k3s_runtime_config['datastore-endpoint']) + - (k3s_etcd_datastore is not defined or not k3s_etcd_datastore) + success_msg: "Control plane configuration is valid." + fail_msg: >- + Control plane configuration is invalid. + Please see notes about k3s_control_node and HA in README.md. + when: + - k3s_controller_list | length == 1 + - not k3s_use_unsupported_config + - k3s_control_node + +- name: Check the conditions when multiple controllers are defined + ansible.builtin.assert: + that: + - (k3s_controller_list | length >= 2) + - (("datastore-endpoint" in k3s_runtime_config and k3s_runtime_config['datastore-endpoint']) + or (k3s_etcd_datastore is defined and k3s_etcd_datastore)) + success_msg: "Control plane configuration is valid." + fail_msg: >- + Control plane configuration is invalid. Please see notes about + k3s_control_node and HA in README.md. + when: + - k3s_controller_list | length >= 2 + - k3s_control_node + +- name: Check the conditions when embedded etcd is defined + ansible.builtin.assert: + that: + - (k3s_controller_list | length >= 3) + - (((k3s_controller_list | length) % 2) == 1) + success_msg: "Control plane configuration is valid." + fail_msg: >- + Etcd should have a minimum of 3 defined members and the number of + members should be odd. Please see notes about HA in README.md + when: + - k3s_etcd_datastore is defined + - k3s_etcd_datastore + - not k3s_use_unsupported_config + - k3s_control_node diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/experimental-variables.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/experimental-variables.yml new file mode 100644 index 000000000..15f731c8d --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/experimental-variables.yml @@ -0,0 +1,31 @@ +--- + +- name: Check if embedded etcd datastore is enabled and marked as experimental + ansible.builtin.assert: + that: + - k3s_use_experimental is defined and k3s_use_experimental + success_msg: "Experimental variables are defined and enabled." + fail_msg: >- + Experimental variable k3s_etcd_datastore has been configured. + If you want to use this ensure you set k3s_use_experimental, + when: + - k3s_etcd_datastore is defined + - k3s_etcd_datastore + - (k3s_release_version | replace('v', '')) is version_compare("1.19.5", '<') + +- name: Check if any experimental variables are configure and if they are enabled with k3s_use_experimental + ansible.builtin.assert: + that: + - k3s_use_experimental is defined and k3s_use_experimental + success_msg: "Experimental variables are defined and enabled." + fail_msg: >- + Experimental variable {{ item.setting }} has been configured. + If you want to use this ensure you set k3s_use_experimental. + {% if item.document is defined %} + Documentation: {{ item.documentation }} + {% endif %} + loop: "{{ k3s_experimental_config }}" + when: + - (item.setting in k3s_runtime_config and k3s_runtime_config[item.setting]) + - ((item.until is not defined) or + (item.until is defined and (k3s_release_version | replace('v', '')) is version_compare(item.until, '<'))) diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/unsupported-rootless.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/unsupported-rootless.yml new file mode 100644 index 000000000..d947243f8 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/unsupported-rootless.yml @@ -0,0 +1,62 @@ +--- + +- name: Check if newuidmap is available + ansible.builtin.command: which newuidmap + failed_when: false + changed_when: false + register: k3s_check_newuidmap_installed + +- name: Check if /proc/sys/kernel/unprivileged_userns_clone exists + ansible.builtin.stat: + path: /proc/sys/kernel/unprivileged_userns_clone + register: k3s_check_unprivileged_userns_exists + +- name: Get the value of /proc/sys/kernel/unprivileged_userns_clone + ansible.builtin.slurp: + src: /proc/sys/kernel/unprivileged_userns_clone + register: k3s_get_unprivileged_userns_clone + when: k3s_check_unprivileged_userns_exists.stat.exists + +- name: Set the value of k3s_get_unprivileged_userns_clone + ansible.builtin.set_fact: + k3s_get_unprivileged_userns_clone: + content: "MQo=" + when: not k3s_check_unprivileged_userns_exists.stat.exists + +- name: Get the value of /proc/sys/user/max_user_namespaces + ansible.builtin.slurp: + src: /proc/sys/user/max_user_namespaces + register: k3s_get_max_user_namespaces + +- name: Get the contents of /etc/subuid + ansible.builtin.slurp: + src: /etc/subuid + register: k3s_get_subuid + +- name: Get the contents of /etc/subgid + ansible.builtin.slurp: + src: /etc/subgid + register: k3s_get_subgid + +- name: Get current user subuid and subgid values + ansible.builtin.set_fact: + k3s_current_user_subuid: "{{ (k3s_get_subuid['content'] | b64decode).split('\n') + | select('search', ansible_user_id) | first | default('UserNotFound:0:0') }}" + k3s_current_user_subgid: "{{ (k3s_get_subgid['content'] | b64decode).split('\n') + | select('search', ansible_user_id) | first | default('UserNotFound:0:0') }}" + +- name: Check user namespaces kernel parameters are adequate + ansible.builtin.assert: + that: + - k3s_get_unprivileged_userns_clone['content'] | b64decode | int == 1 + - k3s_get_max_user_namespaces['content'] | b64decode | int >= 28633 + - k3s_current_user_subuid != "UserNotFound:0:0" + - k3s_current_user_subgid != "UserNotFound:0:0" + - k3s_current_user_subuid.split(':')[2] | int >= 65536 + - k3s_current_user_subgid.split(':')[2] | int >= 65536 + - ansible_env['XDG_RUNTIME_DIR'] is defined + - k3s_check_newuidmap_installed.rc == 0 + success_msg: All kernel parameters passed + fail_msg: >- + Kernel parameters are not set correctly, please check + https://github.com/rootless-containers/rootlesskit diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/variables.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/variables.yml new file mode 100644 index 000000000..38a9c7bb0 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/configuration/variables.yml @@ -0,0 +1,55 @@ +--- + +- name: "Check that k3s_release_version >= {{ k3s_min_version }}" + ansible.builtin.assert: + that: + - (k3s_release_version | replace('v', '')) is version_compare(k3s_min_version, '>=') + success_msg: "{{ k3s_release_version }} is supported by this role." + fail_msg: "{{ k3s_release_version }} is not supported by this role, please use xanmanning.k3s v1.x." + +- name: Check configuration in k3s_server and k3s_agent that needs alternate configuration + ansible.builtin.assert: + that: + - (item.setting not in k3s_runtime_config) + success_msg: "{{ item.setting }} not found in server/agent config" + fail_msg: >- + {{ item.setting }} found in server/agent config. + Please set {{ item.correction }} to use this option. + {% if item.document is defined %} + Documentation: {{ item.documentation }} + {% endif %} + loop: "{{ k3s_config_exclude }}" + +- name: Check configuration in k3s_server and k3s_agent for deprecated configuration + ansible.builtin.assert: + that: + - (item.setting not in k3s_runtime_config) + or (not k3s_runtime_config[item.setting]) + success_msg: "{{ item.setting }} not found in server/agent config" + fail_msg: >- + {{ item.setting }} found in server/agent config. + Please set {{ item.correction }} to use this option. + {% if item.document is defined %} + Documentation: {{ item.documentation }} + {% endif %} + loop: "{{ k3s_deprecated_config }}" + when: + - (item.when is not defined + or (item.when is defined and (k3s_release_version | replace('v', '')) is version_compare(item.when, '>='))) + - not k3s_use_unsupported_config + +- name: Check configuration in k3s_server and k3s_agent against release version + ansible.builtin.assert: + that: + - (k3s_release_version | replace('v', '')) is version_compare(item.version, '>=') + success_msg: "{{ item.setting }} is supported by {{ k3s_release_version }}" + fail_msg: >- + {{ item.setting }} is not supported in {{ k3s_release_version }}. + Please update to v{{ item.version }} to use this option. + {% if item.document is defined %} + Documentation: {{ item.documentation }} + {% endif %} + loop: "{{ k3s_config_version_check }}" + when: + - k3s_config_version_check is defined + - item.setting in k3s_runtime_config diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/environment/local/issue-data.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/environment/local/issue-data.yml new file mode 100644 index 000000000..293533453 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/environment/local/issue-data.yml @@ -0,0 +1,82 @@ +--- + +- name: Ensure facts are gathered + ansible.builtin.setup: + +- name: Ensure Ansible version is captured + ansible.builtin.command: ansible --version + failed_when: false + changed_when: false + register: check_ansible_version + delegate_to: localhost + run_once: true + become: false + +- name: Ensure Ansible config is captured + ansible.builtin.command: ansible-config dump --only-changed + failed_when: false + changed_when: false + register: check_ansible_config + delegate_to: localhost + run_once: true + become: false + +- name: Ensure a list of roles is captured + ansible.builtin.command: ansible-galaxy role list + failed_when: false + changed_when: false + register: check_ansible_roles + delegate_to: localhost + run_once: true + become: false + +- name: Ensure facts are written to disk + ansible.builtin.copy: + dest: "{{ playbook_dir }}/pyratlabs-issue-dump.txt" + content: | + # Begin ANSIBLE VERSION + {{ check_ansible_version.stdout }} + # End ANSIBLE VERSION + + # Begin ANSIBLE CONFIG + {{ check_ansible_config.stdout }} + # End ANSIBLE CONFIG + + # Begin ANSIBLE ROLES + {{ check_ansible_roles.stdout }} + # End ANSIBLE ROLES + + # Begin PLAY HOSTS + {{ ansible_play_hosts_all | to_json }} + # End PLAY HOSTS + + # Begin K3S ROLE CONFIG + {% for host in ansible_play_hosts_all %} + ## {{ host }} + {% for config_key in hostvars[host] %} + {% if config_key | regex_search('^k3s_') %} + {{ config_key }}: {{ hostvars[host][config_key] | to_json }} + {% endif %} + {% endfor %} + + {% endfor %} + # End K3S ROLE CONFIG + + # Begin K3S RUNTIME CONFIG + {% for host in ansible_play_hosts_all %} + ## {{ host }} + {% if hostvars[host].k3s_runtime_config is defined %} + {{ hostvars[host].k3s_runtime_config }} + {% endif %} + {% endfor %} + # End K3S RUNTIME CONFIG + mode: 0600 + delegate_to: localhost + run_once: true + become: false + +- name: Fail the play + ansible.builtin.fail: + msg: "Please include the output of {{ playbook_dir }}/pyratlabs-issue-dump.txt in your bug report." + delegate_to: localhost + run_once: true diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/environment/local/packages.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/environment/local/packages.yml new file mode 100644 index 000000000..d0d99b143 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/environment/local/packages.yml @@ -0,0 +1,13 @@ +--- + +- name: Check that Ansible v{{ ansible_version.string }} is supported by this role + ansible.builtin.assert: + that: + - ansible_version.string is version_compare(k3s_ansible_min_version, '>=') + fail_msg: >- + Ansible v{{ ansible_version.string }} is not supported by this role. + Please install >= v{{ k3s_ansible_min_version }}. + success_msg: "Ansible v{{ ansible_version.string }} is supported." + become: false + delegate_to: localhost + run_once: true diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/environment/remote/packages.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/environment/remote/packages.yml new file mode 100644 index 000000000..2fb9b1256 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/environment/remote/packages.yml @@ -0,0 +1,23 @@ +--- + +- name: Check that {{ package.name }} is installed + ansible.builtin.command: "which {{ package.name }}" + changed_when: false + failed_when: false + register: check_k3s_required_package + +- name: Test that checks for {{ package.name }} passed + ansible.builtin.assert: + that: + - check_k3s_required_package.rc == 0 + success_msg: "Found required package: {{ package.name }}." + fail_msg: >- + Could not find package: {{ package.name }}. + {% if package.documentation is defined %} + Documentation: {{ package.documentation }} + {% endif %} + when: + - (package.until is not defined + or k3s_release_version is version_compare(package.until, '>=')) + - (package.from is not defined + or k3s_release_version is version_compare(package.from, '>=')) diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/main.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/main.yml new file mode 100644 index 000000000..6a40f5d78 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/main.yml @@ -0,0 +1,15 @@ +--- + +- import_tasks: configuration/variables.yml + +- import_tasks: configuration/experimental-variables.yml + +- import_tasks: configuration/unsupported-rootless.yml + when: + - k3s_runtime_config.rootless is defined + - k3s_runtime_config.rootless + +- import_tasks: configuration/control-node-count.yml + when: + - k3s_build_cluster is defined + - k3s_build_cluster diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/post-install.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/post-install.yml new file mode 100644 index 000000000..d1af60813 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/post-install.yml @@ -0,0 +1,5 @@ +--- + +- import_tasks: state/control-plane.yml + +- import_tasks: state/nodes.yml diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/pre-flight.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/pre-flight.yml new file mode 100644 index 000000000..2387cebcf --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/pre-flight.yml @@ -0,0 +1,27 @@ +--- + +- name: Check that k3s_state is a supported value + ansible.builtin.assert: + that: + - k3s_state in k3s_valid_states + fail_msg: "k3s_state not valid. Check README.md for details." + success_msg: "k3s_state is valid." + when: k3s_state is defined + +- import_tasks: environment/local/packages.yml + when: + - not k3s_skip_validation + - not k3s_skip_env_checks + +- include_tasks: environment/remote/packages.yml + loop: "{{ k3s_check_packages }}" + loop_control: + loop_var: package + when: + - k3s_skip_validation + - not k3s_skip_env_checks + +- include_tasks: environment/local/issue-data.yml + when: + - pyratlabs_issue_controller_dump is defined + - pyratlabs_issue_controller_dump diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/state/control-plane.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/state/control-plane.yml new file mode 100644 index 000000000..a1a242a2b --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/state/control-plane.yml @@ -0,0 +1,10 @@ +--- + +- name: Check that the control plane is available to accept connections + ansible.builtin.wait_for: + port: "{{ k3s_runtime_config['https-listen-port'] | default('6443') }}" + host: "{{ k3s_runtime_config['bind-address'] | default('127.0.0.1') }}" + delay: 5 + sleep: 5 + timeout: 300 + when: k3s_control_node diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/state/nodes.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/state/nodes.yml new file mode 100644 index 000000000..6351d8eba --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/state/nodes.yml @@ -0,0 +1,19 @@ +--- + +- name: Check that all nodes to be ready + ansible.builtin.command: "{{ k3s_install_dir }}/kubectl get nodes" + changed_when: false + failed_when: kubectl_get_nodes_result.stdout.find("was refused") != -1 or + kubectl_get_nodes_result.stdout.find("ServiceUnavailable") != -1 + register: kubectl_get_nodes_result + until: + - kubectl_get_nodes_result.rc == 0 + - kubectl_get_nodes_result.stdout.find("NotReady") == -1 + retries: 30 + delay: 20 + when: + - k3s_control_node + - ("flannel-backend" not in k3s_runtime_config + or k3s_runtime_config["flannel-backend"] != "none") + - not ansible_check_mode + become: "{{ k3s_become_for_kubectl | ternary(true, false, k3s_become_for_all) }}" diff --git a/server/ansible/roles/xanmanning.k3s/tasks/validate/state/uninstalled.yml b/server/ansible/roles/xanmanning.k3s/tasks/validate/state/uninstalled.yml new file mode 100644 index 000000000..c666281fd --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/tasks/validate/state/uninstalled.yml @@ -0,0 +1,51 @@ +--- + +- name: Check that k3s is not running + ansible.builtin.command: pgrep k3s + failed_when: + - check_k3s_process.rc == 0 + - not ansible_check_mode + changed_when: false + register: check_k3s_process + +- name: Check that docker is not running + ansible.builtin.command: pgrep docker + failed_when: + - check_k3s_docker_process.rc == 0 + - not ansible_check_mode + changed_when: false + register: check_k3s_docker_process + when: + - k3s_runtime_config.docker is defined + - k3s_runtime_config.docker + +- name: Fail if k3s binaries have not been removed + ansible.builtin.stat: + path: "{{ k3s_install_dir }}/{{ item }}" + register: check_k3s_binaries_removed + failed_when: check_k3s_binaries_removed.stat.exists + loop: + - k3s + - kubectl + - crictl + - ctr + +- name: Check k3s-killall.sh is removed + ansible.builtin.stat: + path: /usr/local/bin/k3s-killall.sh + register: check_k3s_killall + +- name: Fail if k3s-killall.sh script still exists + ansible.builtin.fail: + msg: k3s-killall.sh still exists, uninstall script failed. Please investigate. + when: check_k3s_killall.stat.exists + +- name: Check k3s-uninstall.sh is removed + ansible.builtin.stat: + path: /usr/local/bin/k3s-uninstall.sh + register: check_k3s_uninstall + +- name: Fail if k3s-uninstall.sh script still exists + ansible.builtin.fail: + msg: k3s-uninstall.sh is still exists, uninstall script failed. Please investigate. + when: check_k3s_uninstall.stat.exists diff --git a/server/ansible/roles/xanmanning.k3s/templates/cluster-token.j2 b/server/ansible/roles/xanmanning.k3s/templates/cluster-token.j2 new file mode 100644 index 000000000..4037c1ec9 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/templates/cluster-token.j2 @@ -0,0 +1 @@ +{{ k3s_control_token }} diff --git a/server/ansible/roles/xanmanning.k3s/templates/config.yaml.j2 b/server/ansible/roles/xanmanning.k3s/templates/config.yaml.j2 new file mode 100644 index 000000000..3a1749924 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/templates/config.yaml.j2 @@ -0,0 +1,8 @@ +--- + +{% if k3s_etcd_datastore and (k3s_control_node is defined and k3s_control_node) and (k3s_primary_control_node is defined and k3s_primary_control_node) %} +cluster-init: true +{% endif %} +{% if k3s_runtime_config is defined and k3s_runtime_config | length > 0 %} +{{ k3s_runtime_config | to_nice_yaml(indent=2) }} +{% endif %} diff --git a/server/ansible/roles/xanmanning.k3s/templates/k3s-killall.sh.j2 b/server/ansible/roles/xanmanning.k3s/templates/k3s-killall.sh.j2 new file mode 100644 index 000000000..3fd674bd4 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/templates/k3s-killall.sh.j2 @@ -0,0 +1,69 @@ +#!/bin/sh + +[ $(id -u) -eq 0 ] || exec sudo $0 $@ + +for bin in {{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }}/data/**/bin/; do + [ -d $bin ] && export PATH=$PATH:$bin:$bin/aux +done + +set -x + +for service in {{ k3s_systemd_unit_dir }}/k3s*.service; do + [ -s $service ] && systemctl stop $(basename $service) +done + +for service in /etc/init.d/k3s*; do + [ -x $service ] && $service stop +done + +pschildren() { + ps -e -o ppid= -o pid= | \ + sed -e 's/^\s*//g; s/\s\s*/\t/g;' | \ + grep -w "^$1" | \ + cut -f2 +} + +pstree() { + for pid in $@; do + echo $pid + for child in $(pschildren $pid); do + pstree $child + done + done +} + +killtree() { + kill -9 $( + { set +x; } 2>/dev/null; + pstree $@; + set -x; + ) 2>/dev/null +} + +getshims() { + ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1 +} + +killtree $({ set +x; } 2>/dev/null; getshims; set -x) + +do_unmount_and_remove() { + awk -v path="$1" '$2 ~ ("^" path) { print $2 }' /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount "$0" && rm -rf "$0"' +} + +do_unmount_and_remove '/run/k3s' +do_unmount_and_remove '{{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }}' +do_unmount_and_remove '/var/lib/kubelet/pods' +do_unmount_and_remove '/run/netns/cni-' + +# Remove CNI namespaces +ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete + +# Delete network interface(s) that match 'master cni0' +ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do + iface=${iface%%@*} + [ -z "$iface" ] || ip link delete $iface +done +ip link delete cni0 +ip link delete flannel.1 +rm -rf /var/lib/cni/ +iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore diff --git a/server/ansible/roles/xanmanning.k3s/templates/k3s-uninstall.sh.j2 b/server/ansible/roles/xanmanning.k3s/templates/k3s-uninstall.sh.j2 new file mode 100644 index 000000000..9aa5678b5 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/templates/k3s-uninstall.sh.j2 @@ -0,0 +1,50 @@ +#!/bin/sh +set -x +[ $(id -u) -eq 0 ] || exec sudo $0 $@ + +/usr/local/bin/k3s-killall.sh + +if which systemctl; then + systemctl disable k3s + systemctl reset-failed k3s + systemctl daemon-reload +fi +if which rc-update; then + rc-update delete k3s default +fi + +rm -f {{ k3s_systemd_unit_dir }}/k3s.service + +remove_uninstall() { + rm -f /usr/local/bin/k3s-uninstall.sh +} +trap remove_uninstall EXIT + +if (ls {{ k3s_systemd_unit_dir }}/k3s*.service || ls /etc/init.d/k3s*) >/dev/null 2>&1; then + set +x; echo 'Additional k3s services installed, skipping uninstall of k3s'; set -x + exit +fi + +for cmd in kubectl crictl ctr; do + if [ -L {{ k3s_install_dir }}/$cmd ]; then + rm -f {{ k3s_install_dir }}/$cmd + fi +done + +for bin in {{ k3s_install_dir }}/k3s*; do + if [ -f "${bin}" ]; then + rm -f "${bin}" + fi +done + +rm -rf {{ k3s_config_dir }} +rm -rf /run/k3s +rm -rf /run/flannel +rm -rf {{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }} +rm -rf /var/lib/kubelet +rm -f /usr/local/bin/k3s-killall.sh + +if type yum >/dev/null 2>&1; then + yum remove -y k3s-selinux + rm -f /etc/yum.repos.d/rancher-k3s-common*.repo +fi diff --git a/server/ansible/roles/xanmanning.k3s/templates/k3s.service.j2 b/server/ansible/roles/xanmanning.k3s/templates/k3s.service.j2 new file mode 100644 index 000000000..7fca63bac --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/templates/k3s.service.j2 @@ -0,0 +1,60 @@ +[Unit] +Description=Lightweight Kubernetes +Documentation=https://k3s.io +{% for requires_unit in k3s_service_requires %} +Requires={{ requires_unit }} +{% endfor %} +Wants=network-online.target +{% for wants_unit in k3s_service_wants %} +Wants={{ wants_unit }} +{% endfor %} +{% for before_unit in k3s_service_before %} +Before={{ before_unit }} +{% endfor %} +After=network-online.target +{% for after_unit in k3s_service_after %} +After={{ after_unit }} +{% endfor %} + +[Service] +Type={{ 'notify' if k3s_control_node else 'exec' }} +ExecStartPre=-/sbin/modprobe br_netfilter +ExecStartPre=-/sbin/modprobe overlay +{% filter regex_replace('\s+', ' ') %} +{% filter replace('\n', ' ') %} +ExecStart={{ k3s_install_dir }}/k3s +{% if k3s_debug is defined and k3s_debug %} + --debug +{% endif %} +{% if k3s_control_node %} + server + {% if (k3s_etcd_datastore is defined and k3s_etcd_datastore) and (k3s_primary_control_node is not defined or not k3s_primary_control_node) and k3s_controller_list | length > 1 %} + --server https://{{ k3s_registration_address }}:{{ k3s_control_plane_port | default(6443) }} + --token-file {{ k3s_token_location }} + {% endif %} + {% if k3s_server is defined %} + --config {{ k3s_config_file }} + {% endif %} +{% else %} + agent + --server https://{{ k3s_registration_address }}:{{ k3s_control_plane_port | default(6443) }} + --token-file {{ k3s_token_location }} + {% if k3s_agent is defined %} + --config {{ k3s_config_file }} + {% endif %} +{% endif %} +{% endfilter %} +{% endfilter %} + +KillMode=process +Delegate=yes +LimitNOFILE=1048576 +LimitNPROC=infinity +LimitCORE=infinity +TasksMax=infinity +TimeoutStartSec=0 +Restart=always +RestartSec=5s + +[Install] +WantedBy=multi-user.target diff --git a/server/ansible/roles/xanmanning.k3s/vars/main.yml b/server/ansible/roles/xanmanning.k3s/vars/main.yml new file mode 100644 index 000000000..dfb4c2e15 --- /dev/null +++ b/server/ansible/roles/xanmanning.k3s/vars/main.yml @@ -0,0 +1,145 @@ +--- + +# Minimum supported versions +k3s_min_version: 1.19.3 +k3s_ansible_min_version: 2.9.16 + +# Valid states for this role +k3s_valid_states: + - installed + - started + - stopped + - restarted + - downloaded + - uninstalled + - validated + +# Map ansible fact gathering architecture to a release name and suffix in github. +k3s_arch_lookup: + amd64: + arch: amd64 + suffix: "" + x86_64: + arch: amd64 + suffix: "" + arm64: + arch: arm64 + suffix: "-arm64" + aarch64: + arch: arm64 + suffix: "-arm64" + arm: + arch: arm + suffix: "-armhf" + arm6l: + arch: arm + suffix: "-armhf" + armv6l: + arch: arm + suffix: "-armhf" + arm7: + arch: arm + suffix: "-armhf" + armv7l: + arch: arm + suffix: "-armhf" + armhf: + arch: arm + suffix: "-armhf" + +# Always default to stable channel, this will change with k3s_release_version +k3s_release_channel: stable + +# K3s updates API +k3s_api_releases: https://update.k3s.io/v1-release/channels +# Download location for releases +k3s_github_download_url: "{{ k3s_github_url }}/releases/download" + +# Generate a runtime config dictionary for validation +k3s_runtime_config: "{{ (k3s_server | default({})) | combine (k3s_agent | default({})) }}" + +# Empty array for counting the number of control plane nodes +k3s_controller_list: [] + +# Control plane port default +k3s_control_plane_port: 6443 + +# Default to the "system" systemd context, this will be "user" when running rootless +k3s_systemd_context: system + +# Directory for systemd unit files to be installed. As this role doesn't use package +# management, this should live in /etc/systemd, not /lib/systemd +k3s_systemd_unit_dir: "/etc/systemd/{{ k3s_systemd_context }}" + +# Data directory location for k3s +k3s_data_dir: "{{ k3s_runtime_config['data-dir'] | default('/var/lib/rancher/k3s') }}" + +# Config directroy location for k3s +k3s_config_dir: "{{ k3s_config_file | dirname }}" + +# Directory for gathering the k3s token for clustering. I don't see this changing. +k3s_token_location: "{{ k3s_config_dir }}/cluster-token" + +# Path for additional Kubernetes Manifests +# https://rancher.com/docs/k3s/latest/en/advanced/#auto-deploying-manifests +k3s_server_manifests_dir: "{{ k3s_data_dir }}/server/manifests" + +# Packages that we need to check are installed +k3s_check_packages: [] +# - name: dummy +# from: 1.19.2 +# until: 1.21.0 +# documentation: https://example.com + +# Directories that we need to ensure exist +k3s_ensure_directories_exist: + - name: Config directory + path: "{{ k3s_config_dir }}" + - name: Systemd unit file directory + path: "{{ k3s_systemd_unit_dir }}" + - name: Data directory + path: "{{ k3s_data_dir }}" + - name: Default local storage path + path: "{{ k3s_runtime_config['default-local-storage-path'] | default(omit) }}" + - name: Private registry config file + path: "{{ (k3s_runtime_config['private-registry'] | default(omit)) | dirname }}" + +# Config items that should not appear in k3s_server or k3s_agent +k3s_config_exclude: + - setting: server + correction: k3s_registration_address + - setting: cluster-init + correction: k3s_etcd_datastore + - setting: token + correction: k3s_control_token + - setting: token-file + correction: k3s_token_location + +# Config items and the versions that they were introduced +# k3s_config_version_check: +# - setting: example +# version: 1.19.3 + +# Config items that should be marked as experimental +k3s_experimental_config: + - setting: selinux + until: 1.19.4 + - setting: rootless + - setting: secrets-encryption + - setting: agent-token + - setting: agent-token-file + - setting: cluster-reset + until: 1.19.5 + +# Config items that should be marked as deprecated +k3s_deprecated_config: + - setting: no-flannel + correction: "flannel-backend: 'none'" + # when: 0.10.2 # Example + - setting: cluster-secret + correction: token + - setting: no-deploy + correction: "disable: VALUE" + - setting: docker + correction: "docker: false" + when: 1.20.0