mirror of
https://github.com/auricom/home-cluster.git
synced 2025-09-30 15:37:44 +02:00
Add ansible playbooks
This commit is contained in:
16
server/README.md
Normal file
16
server/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Server infrastructure
|
||||
|
||||
These Ansible Playbooks and Roles are for preparing an Ubuntu 20.10.x OS to play nicely with Kubernetes and standing up k3s ontop of the nodes.
|
||||
|
||||
## Commands
|
||||
|
||||
Commands to run can be found in my Ansible Taskfile located [here](https://github.com/onedr0p/home-cluster/blob/main/.taskfiles/ansible.yml)
|
||||
|
||||
e.g.
|
||||
|
||||
```bash
|
||||
# List hosts in my Ansible inventory
|
||||
task ansible:list
|
||||
# Ping hosts in my Ansible inventory
|
||||
task ansible:ping
|
||||
```
|
53
server/ansible/ansible.cfg
Normal file
53
server/ansible/ansible.cfg
Normal file
@@ -0,0 +1,53 @@
|
||||
[defaults]
|
||||
|
||||
#--- General settings
|
||||
nocows = True
|
||||
forks = 8
|
||||
module_name = command
|
||||
deprecation_warnings = True
|
||||
executable = /bin/bash
|
||||
|
||||
#--- Files/Directory settings
|
||||
log_path = ~/ansible.log
|
||||
inventory = ./inventory
|
||||
library = /usr/share/my_modules
|
||||
remote_tmp = ~/.ansible/tmp
|
||||
local_tmp = ~/.ansible/tmp
|
||||
roles_path = ./roles
|
||||
retry_files_enabled = False
|
||||
|
||||
#--- Fact Caching settings
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = ~/.ansible/facts_cache
|
||||
fact_caching_timeout = 7200
|
||||
|
||||
#--- SSH settings
|
||||
remote_port = 22
|
||||
timeout = 60
|
||||
host_key_checking = False
|
||||
ssh_executable = /usr/bin/ssh
|
||||
private_key_file = ~/.ssh/id_rsa
|
||||
|
||||
force_valid_group_names = ignore
|
||||
|
||||
#--- Speed
|
||||
callback_whitelist = ansible.posix.profile_tasks
|
||||
internal_poll_interval = 0.001
|
||||
|
||||
[inventory]
|
||||
unparsed_is_failed = true
|
||||
|
||||
[privilege_escalation]
|
||||
become = True
|
||||
become_method = sudo
|
||||
become_user = root
|
||||
become_ask_pass = False
|
||||
|
||||
[ssh_connection]
|
||||
scp_if_ssh = smart
|
||||
transfer_method = smart
|
||||
retries = 3
|
||||
timeout = 10
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s
|
||||
pipelining = True
|
||||
control_path = %(directory)s/%%h-%%r
|
@@ -0,0 +1,15 @@
|
||||
---
|
||||
# Use Calico CNI driver
|
||||
calico:
|
||||
enabled: true
|
||||
operator_manifest: "https://docs.projectcalico.org/manifests/tigera-operator.yaml"
|
||||
# Enabling BGP requires your router set up to handle it
|
||||
bgp:
|
||||
enabled: true
|
||||
# peer is usually your router e.g. 192.168.1.1
|
||||
peer: 192.168.8.1
|
||||
as: 64512
|
||||
# externalIPs is the network you want services to consume
|
||||
# this network should not exist or be defined anywhere in your network
|
||||
# e.g. 192.168.169.0/24
|
||||
externalIPs: 192.168.169.0/24
|
@@ -0,0 +1,23 @@
|
||||
---
|
||||
#
|
||||
# Below vars are for the xanmanning.k3s role
|
||||
# ...see https://github.com/PyratLabs/ansible-role-k3s#globalcluster-variables
|
||||
#
|
||||
|
||||
# Use a specific version of k3s
|
||||
k3s_release_version: "v1.20.5+k3s1"
|
||||
|
||||
# Install using hard links rather than symbolic links.
|
||||
# ...if you are using the system-upgrade-controller you will need to use hard links rather than symbolic links as the controller will not be able to follow symbolic links.
|
||||
k3s_install_hard_links: true
|
||||
|
||||
# Escalate user privileges for all tasks.
|
||||
k3s_become_for_all: true
|
||||
|
||||
# Use experimental features (spooky!)
|
||||
k3s_use_experimental: false
|
||||
|
||||
# Enable debugging
|
||||
k3s_debug: false
|
||||
# # Enable embedded-etcd
|
||||
# k3s_etcd_datastore: true
|
@@ -0,0 +1,11 @@
|
||||
---
|
||||
|
||||
# Configure a registry mirror, useful for having a pull-through cache
|
||||
mirror_registry:
|
||||
address: "https://registry-cache.devbu.io"
|
||||
|
||||
# Configure private registries
|
||||
private_registries:
|
||||
- address: "https://registry.devbu.io"
|
||||
username: "admin"
|
||||
password: "password"
|
@@ -0,0 +1,7 @@
|
||||
---
|
||||
# Enable rsyslog
|
||||
# ...requires a rsyslog server already set up
|
||||
rsyslog:
|
||||
enabled: false
|
||||
ip: 192.168.69.155
|
||||
port: 1514
|
@@ -0,0 +1,23 @@
|
||||
---
|
||||
# Enable to skip apt upgrade
|
||||
skip_upgrade_packages: false
|
||||
# Enable to skip removing crufty packages
|
||||
skip_remove_packages: false
|
||||
|
||||
# Timezone for the servers
|
||||
timezone: "Europe/Paris"
|
||||
|
||||
# # Set custom ntp servers
|
||||
# ntp_servers:
|
||||
# primary:
|
||||
# - "time.cloudflare.com"
|
||||
# - "time.google.com"
|
||||
# fallback:
|
||||
# - "0.us.pool.ntp.org"
|
||||
# - "1.us.pool.ntp.org"
|
||||
# - "2.us.pool.ntp.org"
|
||||
# - "3.us.pool.ntp.org"
|
||||
|
||||
# Additional ssh public keys to add to the nodes
|
||||
ssh_authorized_keys:
|
||||
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL+GMHgvbtf6f7xUMAQR+vZFfD/mIIfIDNX5iP8tDRXZ claude@claude-thinkpad-fedora"
|
@@ -0,0 +1,26 @@
|
||||
---
|
||||
|
||||
# https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/
|
||||
# https://github.com/PyratLabs/ansible-role-k3s#server-control-plane-configuration
|
||||
|
||||
# Define the host as control plane nodes
|
||||
k3s_control_node: true
|
||||
|
||||
# k3s settings for all control-plane nodes
|
||||
k3s_server:
|
||||
node-ip: "{{ ansible_host }}"
|
||||
docker: false
|
||||
flannel-backend: 'none' # This needs to be in quotes
|
||||
disable:
|
||||
- flannel
|
||||
- traefik
|
||||
- servicelb
|
||||
- metrics-server
|
||||
- local-storage
|
||||
disable-network-policy: true
|
||||
disable-cloud-controller: true
|
||||
write-kubeconfig-mode: "644"
|
||||
# Network CIDR to use for pod IPs
|
||||
cluster-cidr: "10.69.0.0/16"
|
||||
# Network CIDR to use for service IPs
|
||||
service-cidr: "10.96.0.0/16"
|
@@ -0,0 +1,10 @@
|
||||
---
|
||||
# https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/
|
||||
# https://github.com/PyratLabs/ansible-role-k3s#agent-worker-configuration
|
||||
|
||||
# Don't define the host as control plane nodes
|
||||
k3s_control_node: false
|
||||
|
||||
# k3s settings for all worker nodes
|
||||
k3s_agent:
|
||||
node-ip: "{{ ansible_host }}"
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
# IP address of node
|
||||
ansible_host: "192.168.9.100"
|
||||
|
||||
# Ansible user to ssh into servers with
|
||||
ansible_user: "ubuntu"
|
||||
# ansible_ssh_pass: "ubuntu"
|
||||
# ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null"
|
||||
# ansible_become_pass: "ubuntu"
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
# IP address of node
|
||||
ansible_host: "192.168.9.105"
|
||||
|
||||
# Ansible user to ssh into servers with
|
||||
ansible_user: "ubuntu"
|
||||
# ansible_ssh_pass: "ubuntu"
|
||||
# ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null"
|
||||
# ansible_become_pass: "ubuntu"
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
# IP address of node
|
||||
ansible_host: "192.168.9.106"
|
||||
|
||||
# Ansible user to ssh into servers with
|
||||
ansible_user: "ubuntu"
|
||||
# ansible_ssh_pass: "ubuntu"
|
||||
# ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null"
|
||||
# ansible_become_pass: "ubuntu"
|
@@ -0,0 +1,10 @@
|
||||
---
|
||||
# IP address of node
|
||||
ansible_host: "192.168.9.107"
|
||||
|
||||
# Ansible user to ssh into servers with
|
||||
ansible_user: "ubuntu"
|
||||
# ansible_ssh_pass: "ubuntu"
|
||||
# ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null"
|
||||
# ansible_become_pass: "ubuntu"
|
||||
disks: ""
|
17
server/ansible/inventory/home-cluster/hosts.yml
Normal file
17
server/ansible/inventory/home-cluster/hosts.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
all:
|
||||
children:
|
||||
# Control Plane group, do not change the 'control-plane' name
|
||||
# hosts should match the filenames in 'host_vars'
|
||||
server-nodes:
|
||||
hosts:
|
||||
k3s-server:
|
||||
# Node group, do not change the 'node' name
|
||||
# hosts should match the filenames in 'host_vars'
|
||||
worker-nodes:
|
||||
hosts:
|
||||
k3s-worker1:
|
||||
k3s-worker2:
|
||||
k3s-worker3:
|
||||
# Storage group, these are my NAS devices
|
||||
# hosts should match the filenames in 'host_vars'
|
14
server/ansible/playbooks/k3s/install.yml
Normal file
14
server/ansible/playbooks/k3s/install.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
- hosts:
|
||||
- server-nodes
|
||||
- worker-nodes
|
||||
become: true
|
||||
gather_facts: true
|
||||
any_errors_fatal: true
|
||||
pre_tasks:
|
||||
- name: Pausing for 5 seconds...
|
||||
pause:
|
||||
seconds: 5
|
||||
roles:
|
||||
- xanmanning.k3s
|
||||
- k3s
|
32
server/ansible/playbooks/k3s/nuke.yml
Normal file
32
server/ansible/playbooks/k3s/nuke.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
- hosts:
|
||||
- server-nodes
|
||||
- worker-nodes
|
||||
become: true
|
||||
gather_facts: true
|
||||
any_errors_fatal: true
|
||||
pre_tasks:
|
||||
- name: Pausing for 5 seconds...
|
||||
pause:
|
||||
seconds: 5
|
||||
tasks:
|
||||
- name: kill k3s
|
||||
ansible.builtin.command: /usr/local/bin/k3s-killall.sh
|
||||
- name: uninstall k3s
|
||||
ansible.builtin.command:
|
||||
cmd: /usr/local/bin/k3s-uninstall.sh
|
||||
removes: /usr/local/bin/k3s-uninstall.sh
|
||||
- name: uninstall k3s agent
|
||||
ansible.builtin.command:
|
||||
cmd: /usr/local/bin/k3s-agent-uninstall.sh
|
||||
removes: /usr/local/bin/k3s-agent-uninstall.sh
|
||||
- name: gather list of CNI files to delete
|
||||
find:
|
||||
paths: /etc/cni/net.d
|
||||
patterns: "*"
|
||||
register: files_to_delete
|
||||
- name: delete CNI files
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
loop: "{{ files_to_delete.files }}"
|
13
server/ansible/playbooks/k3s/upgrade.yml
Normal file
13
server/ansible/playbooks/k3s/upgrade.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
- hosts:
|
||||
- server-nodes
|
||||
- worker-nodes
|
||||
become: true
|
||||
gather_facts: true
|
||||
any_errors_fatal: true
|
||||
pre_tasks:
|
||||
- name: Pausing for 5 seconds...
|
||||
pause:
|
||||
seconds: 5
|
||||
roles:
|
||||
- xanmanning.k3s
|
36
server/ansible/playbooks/power-outage/shutdown.yml
Normal file
36
server/ansible/playbooks/power-outage/shutdown.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
- hosts:
|
||||
- server-nodes
|
||||
- worker-nodes
|
||||
become: true
|
||||
gather_facts: true
|
||||
tasks:
|
||||
#
|
||||
# Turn off control-nodes and generic-nodes devices in 2 minutes
|
||||
#
|
||||
|
||||
- name: turn off control-nodes
|
||||
# ansible.builtin.command: /sbin/shutdown -h 2
|
||||
ansible.builtin.command: /sbin/shutdown --help
|
||||
when: "'control-nodes' in group_names"
|
||||
|
||||
- name: turn off generic-nodes
|
||||
# ansible.builtin.command: /sbin/shutdown -h 2
|
||||
ansible.builtin.command: /sbin/shutdown --help
|
||||
when: "'generic-nodes' in group_names"
|
||||
|
||||
#
|
||||
# Turn off NAS devices in 5 minutes
|
||||
#
|
||||
|
||||
# Qnap devices do not have /sbin/shutdown and
|
||||
# instead use busybox /sbin/poweroff
|
||||
- name: turn off storage nodes
|
||||
# ansible.builtin.command: /sbin/poweroff -d 300
|
||||
ansible.builtin.command: /sbin/poweroff --help
|
||||
when: inventory_hostname == "nas-rocinante"
|
||||
|
||||
- name: turn off storage nodes
|
||||
# ansible.builtin.command: /sbin/shutdown -h 5
|
||||
ansible.builtin.command: /sbin/shutdown --help
|
||||
when: inventory_hostname == "nas-serenity"
|
13
server/ansible/playbooks/ubuntu/prepare.yml
Normal file
13
server/ansible/playbooks/ubuntu/prepare.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
- hosts:
|
||||
- server-nodes
|
||||
- worker-nodes
|
||||
become: true
|
||||
gather_facts: true
|
||||
any_errors_fatal: true
|
||||
pre_tasks:
|
||||
- name: Pausing for 5 seconds...
|
||||
pause:
|
||||
seconds: 5
|
||||
roles:
|
||||
- ubuntu
|
22
server/ansible/playbooks/ubuntu/upgrade.yml
Normal file
22
server/ansible/playbooks/ubuntu/upgrade.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
- hosts:
|
||||
- server-nodes
|
||||
- worker-nodes
|
||||
become: true
|
||||
gather_facts: true
|
||||
any_errors_fatal: true
|
||||
pre_tasks:
|
||||
- name: Pausing for 5 seconds...
|
||||
pause:
|
||||
seconds: 5
|
||||
tasks:
|
||||
- name: upgrade
|
||||
ansible.builtin.apt:
|
||||
upgrade: full
|
||||
update_cache: true
|
||||
cache_valid_time: 3600
|
||||
autoclean: true
|
||||
autoremove: true
|
||||
register: apt_upgrade
|
||||
retries: 5
|
||||
until: apt_upgrade is success
|
1
server/ansible/requirements.txt
Normal file
1
server/ansible/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
jmespath==0.10.0
|
6
server/ansible/requirements.yml
Normal file
6
server/ansible/requirements.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
roles:
|
||||
- src: xanmanning.k3s
|
||||
version: v2.8.0
|
||||
collections:
|
||||
- name: community.general
|
38
server/ansible/roles/k3s/tasks/calico.yml
Normal file
38
server/ansible/roles/k3s/tasks/calico.yml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
- name: cluster | calico | deploy tigera operator to k3s manifest directory
|
||||
become: true
|
||||
# run_once: true
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ calico.operator_manifest }}"
|
||||
dest: "{{ k3s_server_manifests_dir }}/tigera-operator.yaml"
|
||||
mode: 0644
|
||||
|
||||
- name: cluster | calico | deploy configuration to k3s manifest directory
|
||||
become: true
|
||||
# run_once: true
|
||||
ansible.builtin.template:
|
||||
src: "calico-installation.yaml.j2"
|
||||
dest: "{{ k3s_server_manifests_dir }}/calico-installation.yaml"
|
||||
mode: 0644
|
||||
|
||||
- name: cluster | calico | deploy BGP-peer to k3s manifest directory
|
||||
become: true
|
||||
# run_once: true
|
||||
ansible.builtin.template:
|
||||
src: "calico-bgppeer.yaml.j2"
|
||||
dest: "{{ k3s_server_manifests_dir }}/calico-bgppeer.yaml"
|
||||
mode: 0644
|
||||
when:
|
||||
- calico.bgp.enabled is defined
|
||||
- calico.bgp.enabled
|
||||
|
||||
- name: cluster | calico | deploy BGP-configuration to k3s manifest directory
|
||||
become: true
|
||||
# run_once: true
|
||||
ansible.builtin.template:
|
||||
src: "calico-bgpconfiguration.yaml.j2"
|
||||
dest: "{{ k3s_server_manifests_dir }}/calico-bgpconfiguration.yaml"
|
||||
mode: 0644
|
||||
when:
|
||||
- calico.bgp.enabled is defined
|
||||
- calico.bgp.enabled
|
20
server/ansible/roles/k3s/tasks/kubeconfig.yml
Normal file
20
server/ansible/roles/k3s/tasks/kubeconfig.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: cluster | kubeconfig | copy config file to /tmp
|
||||
become: true
|
||||
run_once: true
|
||||
ansible.builtin.fetch:
|
||||
src: "/etc/rancher/k3s/k3s.yaml"
|
||||
dest: "/tmp/kubeconfig"
|
||||
flat: true
|
||||
when:
|
||||
- k3s_control_node is defined
|
||||
- k3s_control_node
|
||||
|
||||
- name: cluster | kubeconfig | update kubeconfig with the right IPv4 address
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
run_once: true
|
||||
ansible.builtin.replace:
|
||||
path: "/tmp/kubeconfig"
|
||||
regexp: "https://127.0.0.1:6443"
|
||||
replace: "https://{{ k3s_registration_address }}:6443"
|
21
server/ansible/roles/k3s/tasks/main.yml
Normal file
21
server/ansible/roles/k3s/tasks/main.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
- include: kubeconfig.yml
|
||||
tags:
|
||||
- kubeconfig
|
||||
|
||||
#- include: registry.yml
|
||||
# when: mirror_registry is defined
|
||||
# or (private_registries is defined
|
||||
# and private_registries|length > 0)
|
||||
# tags:
|
||||
# - registry
|
||||
|
||||
- include: calico.yml
|
||||
when:
|
||||
# - "'k8s-control-node-a' in inventory_hostname"
|
||||
- k3s_control_node is defined
|
||||
- k3s_control_node
|
||||
- calico.enabled is defined
|
||||
- calico.enabled
|
||||
tags:
|
||||
- calico
|
21
server/ansible/roles/k3s/tasks/registry.yml
Normal file
21
server/ansible/roles/k3s/tasks/registry.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: cluster-registry | create /etc/rancher/k3s
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "/etc/rancher/k3s"
|
||||
state: directory
|
||||
mode: 0644
|
||||
|
||||
- name: cluster-registry | configure mirrors and custom registries
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: "registries.yaml.j2"
|
||||
dest: "/etc/rancher/k3s/registries.yaml"
|
||||
mode: 0644
|
||||
|
||||
- name: cluster-registry | restart k3s systemd service
|
||||
ansible.builtin.systemd:
|
||||
name: k3s.service
|
||||
daemon_reload: true
|
||||
enabled: true
|
||||
state: restarted
|
@@ -0,0 +1,8 @@
|
||||
---
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
serviceExternalIPs:
|
||||
- cidr: {{ calico.bgp.externalIPs }}
|
@@ -0,0 +1,8 @@
|
||||
---
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: global
|
||||
spec:
|
||||
peerIP: {{ calico.bgp.peer }}
|
||||
asNumber: {{ calico.bgp.as }}
|
@@ -0,0 +1,19 @@
|
||||
#jinja2:lstrip_blocks: True
|
||||
---
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
calicoNetwork:
|
||||
# Note: The ipPools section cannot be modified post-install.
|
||||
ipPools:
|
||||
- blockSize: 26
|
||||
cidr: "{{ k3s_server["cluster-cidr"] }}"
|
||||
{% if calico.bgp.enabled is defined and calico.bgp.enabled %}
|
||||
encapsulation: None
|
||||
{% else %}
|
||||
encapsulation: VXLANCrossSubnet
|
||||
{% endif %}
|
||||
natOutgoing: Enabled
|
||||
nodeSelector: all()
|
20
server/ansible/roles/k3s/templates/registries.yaml.j2
Normal file
20
server/ansible/roles/k3s/templates/registries.yaml.j2
Normal file
@@ -0,0 +1,20 @@
|
||||
#jinja2:lstrip_blocks: True
|
||||
---
|
||||
{% if mirror_registry is defined %}
|
||||
mirrors:
|
||||
"docker.io":
|
||||
endpoint:
|
||||
- "{{ mirror_registry.address }}"
|
||||
"*":
|
||||
endpoint:
|
||||
- "{{ mirror_registry.address }}"
|
||||
{% endif %}
|
||||
{% if private_registries is defined and private_registries|length > 0 %}
|
||||
configs:
|
||||
{% for private_registry in private_registries %}
|
||||
"{{ private_registry.address }}":
|
||||
auth:
|
||||
username: "{{ private_registry.username }}"
|
||||
password: "{{ private_registry.password }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
43
server/ansible/roles/ubuntu/tasks/boot.yml
Normal file
43
server/ansible/roles/ubuntu/tasks/boot.yml
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
- name: boot | grub | check for existence of grub
|
||||
ansible.builtin.stat:
|
||||
path: /etc/default/grub
|
||||
register: grub_result
|
||||
|
||||
- name: boot | grub | set apparmor=0
|
||||
ansible.builtin.replace:
|
||||
path: /etc/default/grub
|
||||
regexp: '^(GRUB_CMDLINE_LINUX_DEFAULT=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$'
|
||||
replace: '\1 {{ option }}={{ value }}\2'
|
||||
vars:
|
||||
option: apparmor
|
||||
value: 0
|
||||
when:
|
||||
- grub_result.stat.exists
|
||||
|
||||
- name: boot | grub | set mitigations=off
|
||||
ansible.builtin.replace:
|
||||
path: /etc/default/grub
|
||||
regexp: '^(GRUB_CMDLINE_LINUX_DEFAULT=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$'
|
||||
replace: '\1 {{ option }}={{ value }}\2'
|
||||
vars:
|
||||
option: mitigations
|
||||
value: "off"
|
||||
when:
|
||||
- grub_result.stat.exists
|
||||
|
||||
- name: boot | grub | set pti=off
|
||||
ansible.builtin.replace:
|
||||
path: /etc/default/grub
|
||||
regexp: '^(GRUB_CMDLINE_LINUX_DEFAULT=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$'
|
||||
replace: '\1 {{ option }}={{ value }}\2'
|
||||
vars:
|
||||
option: pti
|
||||
value: "off"
|
||||
when:
|
||||
- grub_result.stat.exists
|
||||
|
||||
- name: boot | grub | run grub-mkconfig
|
||||
ansible.builtin.command: grub-mkconfig -o /boot/grub/grub.cfg
|
||||
when:
|
||||
- grub_result.stat.exists
|
19
server/ansible/roles/ubuntu/tasks/disks.yml
Normal file
19
server/ansible/roles/ubuntu/tasks/disks.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: disks | create directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0644"
|
||||
when: disks is defined
|
||||
loop:
|
||||
- /mnt/ssd1
|
||||
- /mnt/ssd1/qbittorrent
|
||||
|
||||
- name: disks | mount disks
|
||||
ansible.posix.mount:
|
||||
path: "/mnt/ssd1"
|
||||
src: "UUID=558ddf99-61e8-4ac1-9819-adff7c8cc560"
|
||||
fstype: ext4
|
||||
opts: defaults
|
||||
state: present
|
||||
when: disks is defined
|
20
server/ansible/roles/ubuntu/tasks/filesystem.yml
Normal file
20
server/ansible/roles/ubuntu/tasks/filesystem.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: filesystem | sysctl | update max_user_watches
|
||||
ansible.posix.sysctl:
|
||||
name: fs.inotify.max_user_watches
|
||||
value: "524288"
|
||||
state: present
|
||||
sysctl_file: /etc/sysctl.d/98-kubernetes-fs.conf
|
||||
|
||||
- name: filesystem | swap | disable at runtime
|
||||
ansible.builtin.command: swapoff -a
|
||||
when: ansible_swaptotal_mb > 0
|
||||
|
||||
- name: filesystem | swap| disable on boot
|
||||
ansible.posix.mount:
|
||||
name: "{{ item }}"
|
||||
fstype: swap
|
||||
state: absent
|
||||
loop:
|
||||
- swap
|
||||
- none
|
6
server/ansible/roles/ubuntu/tasks/host.yml
Normal file
6
server/ansible/roles/ubuntu/tasks/host.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: host | hostname | update inventory hostname
|
||||
ansible.builtin.hostname:
|
||||
name: "{{ inventory_hostname }}"
|
||||
when:
|
||||
- ansible_hostname != inventory_hostname
|
25
server/ansible/roles/ubuntu/tasks/kernel.yml
Normal file
25
server/ansible/roles/ubuntu/tasks/kernel.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
- name: kernel | modules | enable at runtime
|
||||
community.general.modprobe:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
loop:
|
||||
- br_netfilter
|
||||
- nf_conntrack
|
||||
- overlay
|
||||
- rbd
|
||||
- ip_vs
|
||||
- iscsi_tcp
|
||||
|
||||
- name: kernel | modules | enable on boot
|
||||
ansible.builtin.copy:
|
||||
mode: 0644
|
||||
content: "{{ item }}"
|
||||
dest: "/etc/modules-load.d/{{ item }}.conf"
|
||||
loop:
|
||||
- br_netfilter
|
||||
- nf_conntrack
|
||||
- overlay
|
||||
- rbd
|
||||
- ip_vs
|
||||
- iscsi_tcp
|
44
server/ansible/roles/ubuntu/tasks/locale.yml
Normal file
44
server/ansible/roles/ubuntu/tasks/locale.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
- name: locale | set timezone
|
||||
community.general.timezone:
|
||||
name: "{{ timezone | default('Europe/Paris') }}"
|
||||
|
||||
- name: locale | copy timesyncd config
|
||||
ansible.builtin.copy:
|
||||
mode: 0644
|
||||
content: |
|
||||
[Time]
|
||||
NTP={{ ntp_servers.primary | default("") | join(" ") }}
|
||||
FallbackNTP={{ ntp_servers.fallback | join(" ") }}
|
||||
dest: /etc/systemd/timesyncd.conf
|
||||
when:
|
||||
- ntp_servers.primary is defined
|
||||
- ntp_servers.primary is iterable
|
||||
- ntp_servers.primary | length > 0
|
||||
- ntp_servers.fallback is defined
|
||||
- ntp_servers.fallback is iterable
|
||||
- ntp_servers.fallback | length > 0
|
||||
|
||||
- name: locale | start systemd service
|
||||
ansible.builtin.systemd:
|
||||
name: systemd-timesyncd
|
||||
enabled: true
|
||||
state: started
|
||||
|
||||
- name: locale | restart systemd service
|
||||
ansible.builtin.systemd:
|
||||
name: systemd-timesyncd
|
||||
daemon_reload: true
|
||||
enabled: true
|
||||
state: restarted
|
||||
|
||||
- name: locale | run timedatectl status
|
||||
ansible.builtin.command: /usr/bin/timedatectl show
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
register: timedatectl_result
|
||||
|
||||
- name: locale | enable ntp
|
||||
ansible.builtin.command: /usr/bin/timedatectl set-ntp true
|
||||
when:
|
||||
- "'NTP=no' in timedatectl_result.stdout"
|
51
server/ansible/roles/ubuntu/tasks/main.yml
Normal file
51
server/ansible/roles/ubuntu/tasks/main.yml
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
- include: host.yml
|
||||
tags:
|
||||
- host
|
||||
|
||||
- include: locale.yml
|
||||
tags:
|
||||
- locale
|
||||
|
||||
- include: packages.yml
|
||||
tags:
|
||||
- packages
|
||||
|
||||
#- include: power-button.yml
|
||||
# tags:
|
||||
# - power-button
|
||||
|
||||
- include: kernel.yml
|
||||
tags:
|
||||
- kernel
|
||||
|
||||
- include: boot.yml
|
||||
tags:
|
||||
- boot
|
||||
|
||||
- include: network.yml
|
||||
tags:
|
||||
- network
|
||||
|
||||
- include: filesystem.yml
|
||||
tags:
|
||||
- filesystem
|
||||
|
||||
- include: unattended-upgrades.yml
|
||||
tags:
|
||||
- unattended-upgrades
|
||||
|
||||
- include: user.yml
|
||||
tags:
|
||||
- user
|
||||
|
||||
- include: rsyslog.yml
|
||||
when:
|
||||
- rsyslog.enabled is defined
|
||||
- rsyslog.enabled
|
||||
tags:
|
||||
- rsyslog
|
||||
|
||||
- include: disks.yml
|
||||
tags:
|
||||
- disks
|
23
server/ansible/roles/ubuntu/tasks/network.yml
Normal file
23
server/ansible/roles/ubuntu/tasks/network.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- name: network | check for bridge-nf-call-iptables
|
||||
ansible.builtin.stat:
|
||||
path: /proc/sys/net/bridge/bridge-nf-call-iptables
|
||||
register: bridge_nf_call_iptables_result
|
||||
|
||||
- name: network | sysctl | set config
|
||||
ansible.builtin.blockinfile:
|
||||
path: /etc/sysctl.d/99-kubernetes-cri.conf
|
||||
mode: 0644
|
||||
create: true
|
||||
block: |
|
||||
net.ipv4.ip_forward = 1
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
when:
|
||||
- bridge_nf_call_iptables_result.stat.exists
|
||||
register: sysctl_network
|
||||
|
||||
- name: network | sysctl | reload
|
||||
ansible.builtin.shell: sysctl -p /etc/sysctl.d/99-kubernetes-cri.conf
|
||||
when:
|
||||
- sysctl_network.changed
|
||||
- bridge_nf_call_iptables_result.stat.exists
|
94
server/ansible/roles/ubuntu/tasks/packages.yml
Normal file
94
server/ansible/roles/ubuntu/tasks/packages.yml
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
- name: packages | disable recommends
|
||||
ansible.builtin.blockinfile:
|
||||
path: /etc/apt/apt.conf.d/02norecommends
|
||||
mode: 0644
|
||||
create: true
|
||||
block: |
|
||||
APT::Install-Recommends "false";
|
||||
APT::Install-Suggests "false";
|
||||
APT::Get::Install-Recommends "false";
|
||||
APT::Get::Install-Suggests "false";
|
||||
|
||||
- name: packages | upgrade all packages
|
||||
ansible.builtin.apt:
|
||||
upgrade: full
|
||||
update_cache: true
|
||||
cache_valid_time: 3600
|
||||
autoclean: true
|
||||
autoremove: true
|
||||
register: apt_upgrade
|
||||
retries: 5
|
||||
until: apt_upgrade is success
|
||||
when:
|
||||
- (skip_upgrade_packages is not defined or (skip_upgrade_packages is defined and not skip_upgrade_packages))
|
||||
|
||||
- name: packages | install common
|
||||
ansible.builtin.apt:
|
||||
name: "{{ packages.apt_install }}"
|
||||
install_recommends: false
|
||||
update_cache: true
|
||||
cache_valid_time: 3600
|
||||
autoclean: true
|
||||
autoremove: true
|
||||
register: apt_install_common
|
||||
retries: 5
|
||||
until: apt_install_common is success
|
||||
when:
|
||||
- packages.apt_install is defined
|
||||
- packages.apt_install is iterable
|
||||
- packages.apt_install | length > 0
|
||||
|
||||
- name: packages | remove crufty packages
|
||||
block:
|
||||
- name: packages | remove crufty packages | gather install packages
|
||||
ansible.builtin.package_facts:
|
||||
manager: auto
|
||||
when:
|
||||
- "'snapd' in packages.apt_remove"
|
||||
- name: packages | remove crufty packages | check if snap is installed
|
||||
ansible.builtin.debug:
|
||||
msg: "snapd is installed"
|
||||
register: snapd_check
|
||||
when:
|
||||
- "'snapd' in packages.apt_remove"
|
||||
- "'snapd' in ansible_facts.packages"
|
||||
- name: packages | remove crufty packages | remove snap packages
|
||||
|
||||
ansible.builtin.command: snap remove {{ item }}
|
||||
loop:
|
||||
- lxd
|
||||
- core18
|
||||
- snapd
|
||||
when:
|
||||
- "'snapd' in packages.apt_remove"
|
||||
- "'snapd' in ansible_facts.packages"
|
||||
- snapd_check.failed is defined
|
||||
- name: packages | remove crufty packages | remove packages
|
||||
|
||||
ansible.builtin.apt:
|
||||
name: "{{ packages.apt_remove }}"
|
||||
state: absent
|
||||
autoremove: true
|
||||
- name: packages | remove crufty packages | remove crufty files
|
||||
|
||||
ansible.builtin.file:
|
||||
state: absent
|
||||
path: "{{ item }}"
|
||||
loop:
|
||||
- "/home/{{ ansible_user }}/.snap"
|
||||
- "/snap"
|
||||
- "/var/snap"
|
||||
- "/var/lib/snapd"
|
||||
- "/var/cache/snapd"
|
||||
- "/usr/lib/snapd"
|
||||
- "/etc/cloud"
|
||||
- "/var/lib/cloud"
|
||||
when:
|
||||
- "'snapd' in packages.apt_remove"
|
||||
- "'cloud-init' in packages.apt_remove"
|
||||
when:
|
||||
- packages.apt_remove is defined
|
||||
- packages.apt_remove is iterable
|
||||
- packages.apt_remove | length > 0
|
||||
- (skip_remove_packages is not defined or (skip_remove_packages is defined and not skip_remove_packages))
|
15
server/ansible/roles/ubuntu/tasks/power-button.yml
Normal file
15
server/ansible/roles/ubuntu/tasks/power-button.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: power-button | disable single power button press shutdown
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/systemd/logind.conf
|
||||
regexp: "{{ item.setting }}"
|
||||
line: "{{ item.setting }}={{ item.value }}"
|
||||
loop:
|
||||
- { setting: HandlePowerKey, value: ignore }
|
||||
|
||||
- name: power-button | restart logind systemd service
|
||||
ansible.builtin.systemd:
|
||||
name: systemd-logind.service
|
||||
daemon_reload: true
|
||||
enabled: true
|
||||
state: restarted
|
19
server/ansible/roles/ubuntu/tasks/rsyslog.yml
Normal file
19
server/ansible/roles/ubuntu/tasks/rsyslog.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: rsyslog
|
||||
block:
|
||||
- name: rsyslog | copy promtail configuration
|
||||
ansible.builtin.template:
|
||||
src: "rsyslog-50-promtail.conf.j2"
|
||||
dest: "/etc/rsyslog.d/50-promtail.conf"
|
||||
mode: 0644
|
||||
- name: rsyslog | start systemd service
|
||||
ansible.builtin.systemd:
|
||||
name: rsyslog
|
||||
enabled: true
|
||||
state: started
|
||||
- name: rsyslog | restart systemd service
|
||||
ansible.builtin.systemd:
|
||||
name: rsyslog.service
|
||||
daemon_reload: true
|
||||
enabled: true
|
||||
state: restarted
|
37
server/ansible/roles/ubuntu/tasks/unattended-upgrades.yml
Normal file
37
server/ansible/roles/ubuntu/tasks/unattended-upgrades.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
- name: unattended-upgrades | copy 20auto-upgrades config
|
||||
ansible.builtin.blockinfile:
|
||||
path: /etc/apt/apt.conf.d/20auto-upgrades
|
||||
mode: 0644
|
||||
create: true
|
||||
block: |
|
||||
APT::Periodic::Update-Package-Lists "14";
|
||||
APT::Periodic::Download-Upgradeable-Packages "14";
|
||||
APT::Periodic::AutocleanInterval "7";
|
||||
APT::Periodic::Unattended-Upgrade "1";
|
||||
|
||||
- name: unattended-upgrades | copy 50unattended-upgrades config
|
||||
ansible.builtin.blockinfile:
|
||||
path: /etc/apt/apt.conf.d/50unattended-upgrades
|
||||
mode: 0644
|
||||
create: true
|
||||
block: |
|
||||
Unattended-Upgrade::Automatic-Reboot "false";
|
||||
Unattended-Upgrade::Remove-Unused-Dependencies "true";
|
||||
Unattended-Upgrade::Allowed-Origins {
|
||||
"${distro_id}:${distro_codename}";
|
||||
"${distro_id} ${distro_codename}-security";
|
||||
};
|
||||
|
||||
- name: unattended-upgrades | start systemd service
|
||||
ansible.builtin.systemd:
|
||||
name: unattended-upgrades
|
||||
enabled: true
|
||||
state: started
|
||||
|
||||
- name: unattended-upgrades | restart systemd service
|
||||
ansible.builtin.service:
|
||||
name: unattended-upgrades.service
|
||||
daemon_reload: true
|
||||
enabled: true
|
||||
state: restarted
|
35
server/ansible/roles/ubuntu/tasks/user.yml
Normal file
35
server/ansible/roles/ubuntu/tasks/user.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
- name: user | get home directory
|
||||
ansible.builtin.shell: "echo $HOME"
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
register: user_home
|
||||
|
||||
- name: user | add to sudoers
|
||||
ansible.builtin.copy:
|
||||
content: "{{ ansible_user }} ALL=(ALL:ALL) NOPASSWD:ALL"
|
||||
dest: "/etc/sudoers.d/{{ ansible_user }}_nopasswd"
|
||||
mode: "0440"
|
||||
|
||||
- name: user | add additional SSH public keys
|
||||
ansible.posix.authorized_key:
|
||||
user: "{{ ansible_user }}"
|
||||
key: "{{ item }}"
|
||||
loop: "{{ ssh_authorized_keys }}"
|
||||
when:
|
||||
- ssh_authorized_keys is defined
|
||||
- ssh_authorized_keys is iterable
|
||||
- ssh_authorized_keys | length > 0
|
||||
|
||||
- name: user | check if hushlogin exists
|
||||
ansible.builtin.stat:
|
||||
path: "/{{ user_home.stdout }}/.hushlogin"
|
||||
register: hushlogin_result
|
||||
|
||||
- name: user | silence the login prompt
|
||||
ansible.builtin.file:
|
||||
dest: "/{{ user_home.stdout }}/.hushlogin"
|
||||
state: touch
|
||||
owner: "{{ ansible_user }}"
|
||||
mode: "0775"
|
||||
when: not hushlogin_result.stat.exists
|
@@ -0,0 +1,4 @@
|
||||
module(load="omprog")
|
||||
module(load="mmutf8fix")
|
||||
action(type="mmutf8fix" replacementChar="?")
|
||||
action(type="omfwd" protocol="tcp" target="{{ rsyslog.ip }}" port="{{ rsyslog.port }}" Template="RSYSLOG_SyslogProtocol23Format" TCP_Framing="octet-counted" KeepAlive="on")
|
69
server/ansible/roles/ubuntu/vars/main.yml
Normal file
69
server/ansible/roles/ubuntu/vars/main.yml
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
packages:
|
||||
apt_install:
|
||||
- apt-transport-https
|
||||
- arptables
|
||||
- ca-certificates
|
||||
- curl
|
||||
# - dnsutils
|
||||
- ebtables
|
||||
# - ethtool
|
||||
# - git
|
||||
# - gnupg-agent
|
||||
# - gnupg2
|
||||
# - haveged
|
||||
- hdparm
|
||||
- htop
|
||||
# - iperf3
|
||||
- iputils-ping
|
||||
- ipvsadm
|
||||
# - jq
|
||||
- lvm2
|
||||
# - neofetch
|
||||
- net-tools
|
||||
# - netcat
|
||||
- nfs-common
|
||||
- nano
|
||||
# - nmap
|
||||
- ntpdate
|
||||
- open-iscsi
|
||||
# - pigz
|
||||
- psmisc
|
||||
# - python3
|
||||
# - python3-openssl
|
||||
# - python3-pip
|
||||
# - rclone
|
||||
# - rsync
|
||||
# - scsitools
|
||||
- smartmontools
|
||||
- socat
|
||||
- software-properties-common
|
||||
# - traceroute
|
||||
# - tree
|
||||
- unattended-upgrades
|
||||
- unzip
|
||||
# - vim
|
||||
apt_remove:
|
||||
- apparmor
|
||||
- apport
|
||||
- bcache-tools
|
||||
- btrfs-progs
|
||||
- byobu
|
||||
- cloud-init
|
||||
- cloud-guest-utils
|
||||
- cloud-initramfs-copymods
|
||||
- cloud-initramfs-dyn-netconf
|
||||
- friendly-recovery
|
||||
- fwupd
|
||||
- landscape-common
|
||||
- lxd-agent-loader
|
||||
- ntfs-3g
|
||||
- open-vm-tools
|
||||
- plymouth
|
||||
- plymouth-theme-ubuntu-text
|
||||
- popularity-contest
|
||||
- snapd
|
||||
- sosreport
|
||||
- tmux
|
||||
- ubuntu-advantage-tools
|
||||
- ufw
|
4
server/ansible/roles/xanmanning.k3s/.ansible-lint
Normal file
4
server/ansible/roles/xanmanning.k3s/.ansible-lint
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
|
||||
skip_list:
|
||||
- role-name
|
55
server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
55
server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
---
|
||||
|
||||
<!-- Please first verify that your issue is not already reported on GitHub -->
|
||||
<!-- Complete *all* sections as described. -->
|
||||
|
||||
### Summary
|
||||
|
||||
<!-- Explain the problem briefly below -->
|
||||
|
||||
### Issue Type
|
||||
|
||||
- Bug Report
|
||||
|
||||
### Controller Environment and Configuration
|
||||
|
||||
<!-- Please re-run your playbook with: `-e "pyratlabs_issue_controller_dump=true"` -->
|
||||
<!-- Example: `ansible-playbook -e "pyratlabs_issue_controller_dump=true" /path/to/playbook.yml` -->
|
||||
<!-- Then please copy-and-paste the contents (or attach) to this issue. -->
|
||||
|
||||
<!-- Please also include information about the version of the role you are using -->
|
||||
|
||||
```text
|
||||
|
||||
```
|
||||
|
||||
### Steps to Reproduce
|
||||
|
||||
<!-- Describe exactly how to reproduce the problem, using a minimal test-case -->
|
||||
|
||||
<!-- Paste example playbooks or commands between quotes below -->
|
||||
|
||||
```yaml
|
||||
|
||||
```
|
||||
|
||||
### Expected Result
|
||||
|
||||
<!-- Describe what you expected to happen when running the steps above -->
|
||||
|
||||
```text
|
||||
|
||||
```
|
||||
|
||||
### Actual Result
|
||||
|
||||
<!-- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
|
||||
|
||||
<!-- Paste verbatim command output between quotes -->
|
||||
|
||||
```text
|
||||
|
||||
```
|
3
server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
3
server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
|
||||
blank_issues_enabled: true
|
33
server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
33
server/ansible/roles/xanmanning.k3s/.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
---
|
||||
|
||||
<!-- Please first verify that your feature was not already discussed on GitHub -->
|
||||
<!-- Complete *all* sections as described, this form is processed automatically -->
|
||||
|
||||
### Summary
|
||||
|
||||
<!-- Describe the new feature/improvement briefly below -->
|
||||
|
||||
### Issue Type
|
||||
|
||||
- Feature Request
|
||||
|
||||
### User Story
|
||||
|
||||
<!-- If you can, please provide a user story, if you don't know what this is don't worry, it will be refined by PyratLabs. -->
|
||||
<!-- Describe who would use it, why it is needed and the benefit -->
|
||||
|
||||
_As a_ <!-- (Insert Persona) --> \
|
||||
_I want to_ <!-- (Insert Action) --> \
|
||||
_So that_ <!-- (Insert Benefit) -->
|
||||
|
||||
### Additional Information
|
||||
|
||||
<!-- Please include any relevant documentation, URLs, etc. -->
|
||||
<!-- Paste example playbooks or commands between quotes below -->
|
||||
|
||||
```yaml
|
||||
|
||||
```
|
37
server/ansible/roles/xanmanning.k3s/.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
37
server/ansible/roles/xanmanning.k3s/.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
## TITLE
|
||||
|
||||
### Summary
|
||||
|
||||
<!-- Describe the change below, including rationale and design decisions -->
|
||||
|
||||
<!-- HINT: Include "Fixes #nnn" if you are fixing an existing issue -->
|
||||
|
||||
### Issue type
|
||||
|
||||
<!-- Pick one below and delete the rest -->
|
||||
- Bugfix
|
||||
- Documentation
|
||||
- Feature
|
||||
|
||||
### Test instructions
|
||||
|
||||
<!-- Please provide instructions for testing this PR -->
|
||||
|
||||
### Acceptance Criteria
|
||||
|
||||
<!-- Please list criteria required to ensure this change has been sufficiently reviewed. -->
|
||||
|
||||
<!-- Example ticklist:
|
||||
- [ ] GitHub Actions Build passes.
|
||||
- [ ] Documentation updated.
|
||||
-->
|
||||
|
||||
### Additional Information
|
||||
|
||||
<!-- Include additional information to help people understand the change here -->
|
||||
|
||||
<!-- Paste verbatim command output below, e.g. before and after your change -->
|
||||
|
||||
```text
|
||||
|
||||
```
|
18
server/ansible/roles/xanmanning.k3s/.github/stale.yml
vendored
Normal file
18
server/ansible/roles/xanmanning.k3s/.github/stale.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
daysUntilStale: 60
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- pinned
|
||||
- security
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: wontfix
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||
closeComment: false
|
65
server/ansible/roles/xanmanning.k3s/.github/workflows/ci.yml
vendored
Normal file
65
server/ansible/roles/xanmanning.k3s/.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
|
||||
name: CI
|
||||
'on':
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- v1_release
|
||||
schedule:
|
||||
- cron: "0 1 1 * *"
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: "xanmanning.k3s"
|
||||
|
||||
jobs:
|
||||
molecule:
|
||||
name: Molecule
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- distro: debian10
|
||||
scenario: default
|
||||
- distro: ubuntu2004
|
||||
scenario: default
|
||||
- distro: amazonlinux2
|
||||
scenario: default
|
||||
- distro: centos7
|
||||
scenario: default
|
||||
- distro: ubuntu1804
|
||||
scenario: default
|
||||
- distro: fedora31
|
||||
scenario: nodeploy
|
||||
- distro: fedora29
|
||||
scenario: highavailabilitydb
|
||||
- distro: fedora30
|
||||
scenario: autodeploy
|
||||
- distro: debian9
|
||||
scenario: highavailabilityetcd
|
||||
- distro: centos8
|
||||
scenario: highavailabilityetcd
|
||||
|
||||
steps:
|
||||
- name: Checkout codebase
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: "xanmanning.k3s"
|
||||
|
||||
- name: Set up Python 3
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install test dependencies
|
||||
run: pip3 install -r molecule/requirements.txt
|
||||
|
||||
- name: Run Molecule tests
|
||||
run: molecule test --scenario-name "${{ matrix.scenario }}"
|
||||
env:
|
||||
PY_COLORS: '1'
|
||||
ANSIBLE_FORCE_COLOR: '1'
|
||||
MOLECULE_DISTRO: ${{ matrix.distro }}
|
32
server/ansible/roles/xanmanning.k3s/.github/workflows/release.yml
vendored
Normal file
32
server/ansible/roles/xanmanning.k3s/.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
|
||||
name: Release
|
||||
'on':
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: "xanmanning.k3s"
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout codebase
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: "xanmanning.k3s"
|
||||
|
||||
- name: Set up Python 3
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install Ansible
|
||||
run: pip3 install -r requirements.txt
|
||||
|
||||
- name: Trigger a new import on Galaxy
|
||||
run: ansible-galaxy role import --api-key ${{ secrets.GALAXY_API_KEY }} $(echo ${{ github.repository }} | cut -d/ -f1) $(echo ${{ github.repository }} | cut -d/ -f2)
|
12
server/ansible/roles/xanmanning.k3s/.gitignore
vendored
Normal file
12
server/ansible/roles/xanmanning.k3s/.gitignore
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
.vagrant
|
||||
*.retry
|
||||
VAULT_PASSWORD
|
||||
VAULT_PASS
|
||||
.vault_pass
|
||||
.vault_pass.asc
|
||||
vagramt/fetch
|
||||
vagrant/ubuntu-*.log
|
||||
__pycache__
|
||||
ansible.cfg
|
||||
pyratlabs-issue-dump.txt
|
||||
.cache
|
33
server/ansible/roles/xanmanning.k3s/.yamllint
Normal file
33
server/ansible/roles/xanmanning.k3s/.yamllint
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
# Based on ansible-lint config
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
braces:
|
||||
max-spaces-inside: 1
|
||||
level: error
|
||||
brackets:
|
||||
max-spaces-inside: 1
|
||||
level: error
|
||||
colons:
|
||||
max-spaces-after: -1
|
||||
level: error
|
||||
commas:
|
||||
max-spaces-after: -1
|
||||
level: error
|
||||
comments: disable
|
||||
comments-indentation: disable
|
||||
document-start: disable
|
||||
empty-lines:
|
||||
max: 3
|
||||
level: error
|
||||
hyphens:
|
||||
level: error
|
||||
indentation: disable
|
||||
key-duplicates: enable
|
||||
line-length: disable
|
||||
new-line-at-end-of-file: disable
|
||||
new-lines:
|
||||
type: unix
|
||||
trailing-spaces: disable
|
||||
truthy: disable
|
304
server/ansible/roles/xanmanning.k3s/CHANGELOG.md
Normal file
304
server/ansible/roles/xanmanning.k3s/CHANGELOG.md
Normal file
@@ -0,0 +1,304 @@
|
||||
# Change Log
|
||||
|
||||
<!--
|
||||
## DATE, vx.x.x
|
||||
|
||||
### Notable changes
|
||||
|
||||
### Breaking changes
|
||||
|
||||
### Known issues
|
||||
|
||||
### Contributors
|
||||
|
||||
---
|
||||
-->
|
||||
|
||||
## 2021-03-14, v2.8.0
|
||||
|
||||
Happy π day!
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Updated GitHub Actions, resolved linting errors.
|
||||
- Renamed `k3s_control_node_address` -> `k3s_registration_address`
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- A task has been added to rename `k3s_control_node_address` to
|
||||
`k3s_registration_address` for any users still using this variable name,
|
||||
however this might still break something.
|
||||
|
||||
---
|
||||
|
||||
## 2021-02-28, v2.7.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bugfix, missing become on cluster token check.
|
||||
|
||||
---
|
||||
|
||||
## 2021-02-27, v2.7.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Cluster init checks added.
|
||||
- Tidy up of tasks, failed checks.
|
||||
- Possible fix for #93 - force draining of nodes added.
|
||||
|
||||
---
|
||||
|
||||
## 2021-02-27, v2.6.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bugfix: Templating error for single control plane nodes using Etcd.
|
||||
- Bugfix: a number of typos fixed.
|
||||
|
||||
---
|
||||
|
||||
## 2021-02-16, v2.6.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Tidy up of `when` params and `assert` tasks to be more readable.
|
||||
- Added feature to tweak K3S service dependencies.
|
||||
- Updated documentation:
|
||||
- Node labels and component arguments
|
||||
- systemd config
|
||||
- Use alternate CNI (Calico example)
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-31, v2.5.3
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bugfix, missing update to minimum ansible version var #91.
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-30, v2.5.2
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bugfix, missing `k3s_start_on_boot` to control `systemd.enabled` added.
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-30, v2.5.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Added uninstall task to remove hard-linked files #88
|
||||
- Fixed missing become for `systemd` operations tasks. #89
|
||||
- Added `k3s_start_on_boot` to control `systemd.enabled`.
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-24, v2.5.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Added support for Ansible >= 2.9.17 #83
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-23, v2.4.3
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bufgix: Installation hangs on "Check that all nodes to be ready" #84
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-10, v2.4.2
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bufgix: Docker check still failing on "false"
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-02, v2.4.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fixed issue with armv6l (Raspberry Pi Zero W)
|
||||
- Added path for private repositories config to directory creation list.
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-21, v2.4.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- `k3s_config_dir` derived from `k3s_config_file`, reused throughout the role
|
||||
to allow for easy removal of "Rancher" references #73.
|
||||
- `k3s_token_location` has moved to be in `k3s_config_dir`.
|
||||
- Tasks for creating directories now looped to caputure configuration from
|
||||
`k3s_server` and `k3s_agent` and ensure directories exist before k3s
|
||||
starts, see #75.
|
||||
- Server token collected directly from token file, not symlinked file
|
||||
(node-token).
|
||||
- `k3s_runtime_config` defined in `vars/` for validation and overwritten in
|
||||
tasks for control plane and workers.
|
||||
- Removed unused references to GitHub API.
|
||||
- `set_fact` and `command` tasks now use FQCN.
|
||||
- Check of `ansible_version` in environment check.
|
||||
- Introduction of target environment checks for #72.
|
||||
- Fixed bug with non-default listening port not being passed to workers.
|
||||
- Added ability to put documentation links into validation checks #76.
|
||||
- Removed the requirement for `jmespath` on the Ansible controller.
|
||||
- Fixed bug with issue data collection tasks.
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- Ansible minimum version is hard set to v2.10.4
|
||||
- `k3s_token_location` has moved to be in `k3s_config_dir` so re-running the
|
||||
role will create a duplicate file here.
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-19, v2.3.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Updated k3s uninstall scripts #74
|
||||
- Started moving Rancher references to `vars/` as per #73
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-19, v2.2.2
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fixed typos in documentation.
|
||||
- Molecule testing pinned to v3.1 due to tests failing.
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-16, v2.2.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Re-working documentation
|
||||
- Updated GitHub link, org changed from Rancher to k3s-io.
|
||||
- Replace deprecated `play_hosts` variable.
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- Moving git branch from `master` to `main`.
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-12, v2.2.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Use of FQCNs enforced, minimum Ansible version now v2.10
|
||||
- `k3s_etcd_datastore` no longer experimental after K3s version v1.19.5+k3s1
|
||||
- Docker marked as deprecated for K3s > v1.20.0+k3s1
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- Use of FQCNs enforced, minimum Ansible version now v2.10
|
||||
- Use of Docker requires `k3s_use_unsupported_config` to be `true` after
|
||||
v1.20.0+k3s1
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-05, v2.1.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fixed link to documentation.
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-05, v2.1.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Deprecated configuration check built into validation steps.
|
||||
- Removed duplicated tasks for single node cluster.
|
||||
- Added documentation providing quickstart examples and common operations.
|
||||
- Fixed data-dir configuration.
|
||||
- Some tweaks to rootless.
|
||||
- Fix draining and removing of nodes.
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- `k3s_token_location` now points to a file location, not a directory.
|
||||
- `k3s_systemd_unit_directory` renamed to `k3s_systemd_unit_dir`
|
||||
- Removed `k3s_node_data_dir` as this is now configured with `data-dir` in
|
||||
`k3s_server` and/or `k3s_agent`.
|
||||
|
||||
### Known issues
|
||||
|
||||
- Rootless is still broken, this is still not supported as a method for
|
||||
running k3s using this role.
|
||||
|
||||
---
|
||||
|
||||
## 2020-11-30, v2.0.2
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Updated issue template and information collection tasks.
|
||||
|
||||
---
|
||||
|
||||
## 2020-11-30, v2.0.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fixed a number of typos in the README.md
|
||||
- Updated the meta/main.yml to put quotes around minimum Ansible version.
|
||||
|
||||
---
|
||||
|
||||
## 2020-11-29, v2.0.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- #64 - Initial release of v2.0.0 of
|
||||
[ansible-role-k3s](https://github.com/PyratLabs/ansible-role-k3s).
|
||||
- Minimum supported k3s version now: v1.19.1+k3s1
|
||||
- Minimum supported Ansible version now: v2.10.0
|
||||
- #62 - Remove all references to the word "master".
|
||||
- #53 - Move to file-based configuration.
|
||||
- Refactored to avoid duplication in code and make contribution easier.
|
||||
- Validation checks moved to using variables defined in `vars/`
|
||||
|
||||
### Breaking changes
|
||||
|
||||
#### File based configuration
|
||||
|
||||
Issue #53
|
||||
|
||||
With the release of v1.19.1+k3s1, this role has moved to file-based
|
||||
configuration of k3s. This requires manuall translation of v1 configuration
|
||||
variables into configuration file format.
|
||||
|
||||
Please see: https://rancher.com/docs/k3s/latest/en/installation/install-options/#configuration-file
|
||||
|
||||
#### Minimum supported k3s version
|
||||
|
||||
As this role now relies on file-based configuration, the v2.x release of this
|
||||
role will only support v1.19+ of k3s. If you are not in a position to update
|
||||
k3s you will need to continue using the v1.x release of this role, which will
|
||||
be supported until March 2021<!-- 1 year after k8s v1.18 release -->.
|
||||
|
||||
#### Minimum supported ansible version
|
||||
|
||||
This role now only supports Ansible v2.10+, this is because it has moved on to
|
||||
using FQDNs, with the exception of `set_fact` tasks which have
|
||||
[been broken](https://github.com/ansible/ansible/issues/72319) and the fixes
|
||||
have [not yet been backported to v2.10](https://github.com/ansible/ansible/pull/71824).
|
||||
|
||||
The use of FQDNs allows for custom modules to be introduced to override task
|
||||
behavior. If this role requires a custom ansible module to be introduced then
|
||||
this can be added as a dependency and targeted specifically by using the
|
||||
correct FQDN.
|
46
server/ansible/roles/xanmanning.k3s/CONTRIBUTING.md
Normal file
46
server/ansible/roles/xanmanning.k3s/CONTRIBUTING.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Contribution Guidelines
|
||||
|
||||
Thank you for taking time to contribute to this Ansible role.
|
||||
|
||||
There are a number of ways that you can contribute to this project, not all of
|
||||
them requiring you to be able to write code. Below is a list of suggested
|
||||
contributions welcomed by the community:
|
||||
|
||||
- Submit bug reports in GitHub issues
|
||||
- Comment on bug reports with futher information or suggestions
|
||||
- Suggest new features
|
||||
- Create Pull Requests fixing bugs or adding new features
|
||||
- Update and improve documentation
|
||||
- Review the role on Ansible Galaxy
|
||||
- Write a blog post reviewing the role
|
||||
- Sponsor me.
|
||||
|
||||
## Issue guidelines
|
||||
|
||||
Issues are the best way to capture an bug in the role, or suggest new features.
|
||||
This is due to issues being visible to the entire community and allows for
|
||||
other contributors to pick up the work, so is a better communication medium
|
||||
than email.
|
||||
|
||||
A good bug issue will include as much information as possible about the
|
||||
environment Ansible is running in, as well as the role configuration. If there
|
||||
are any relevant pieces of documentation from upstream projects, this should
|
||||
be included.
|
||||
|
||||
New feature requests are also best captured in issues, these should include
|
||||
as much relevant information as possible and if possible include a "user story"
|
||||
(don't sweat if you don't know how to write one). If there are any relevant
|
||||
pieces of documentation from upstream projects, this should be included.
|
||||
|
||||
## Pull request guidelines
|
||||
|
||||
PRs should only contain 1 issue fix at a time to limit the scope of testing
|
||||
required. The smaller the scope of the PR, the easier it is for it to be
|
||||
reviewed.
|
||||
|
||||
PRs should include the keyword `Fixes` before an issue number if the PR will
|
||||
completely close the issue. This is because automation will close the issue
|
||||
once the PR is merged.
|
||||
|
||||
PRs are preferred to be merged in as a single commit, so rebasing before
|
||||
pushing is recommended, however this isn't a strict rule.
|
30
server/ansible/roles/xanmanning.k3s/LICENSE.txt
Normal file
30
server/ansible/roles/xanmanning.k3s/LICENSE.txt
Normal file
@@ -0,0 +1,30 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2020, Xan Manning
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
344
server/ansible/roles/xanmanning.k3s/README.md
Normal file
344
server/ansible/roles/xanmanning.k3s/README.md
Normal file
@@ -0,0 +1,344 @@
|
||||
# Ansible Role: k3s (v2.x)
|
||||
|
||||
Ansible role for installing [K3S](https://k3s.io/) ("Lightweight
|
||||
Kubernetes") as either a standalone server or cluster.
|
||||
|
||||
[](https://github.com/PyratLabs/ansible-role-k3s/actions?query=workflow%3ACI)
|
||||
|
||||
## Release notes
|
||||
|
||||
Please see [Releases](https://github.com/PyratLabs/ansible-role-k3s/releases)
|
||||
and [CHANGELOG.md](CHANGELOG.md).
|
||||
|
||||
## Requirements
|
||||
|
||||
The host you're running Ansible from requires the following Python dependencies:
|
||||
|
||||
- `ansbile >= 2.9.17` or `ansible-base >= 2.10.4`
|
||||
|
||||
You can install dependencies using the requirements.txt file in this repository:
|
||||
`pip3 install -r requirements.txt`.
|
||||
|
||||
This role has been tested against the following Linux Distributions:
|
||||
|
||||
- Amazon Linux 2
|
||||
- Archlinux
|
||||
- CentOS 8
|
||||
- CentOS 7
|
||||
- Debian 9
|
||||
- Debian 10
|
||||
- Fedora 29
|
||||
- Fedora 30
|
||||
- Fedora 31
|
||||
- Fedora 32
|
||||
- openSUSE Leap 15
|
||||
- Ubuntu 18.04 LTS
|
||||
- Ubuntu 20.04 LTS
|
||||
|
||||
:warning: The v2 releases of this role only supports `k3s >= v1.19`, for
|
||||
`k3s < v1.19` please consider updating or use the v1.x releases of this role.
|
||||
|
||||
Before upgrading, see [CHANGELOG](CHANGELOG.md) for notifications of breaking
|
||||
changes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
Since K3s [v1.19.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.19.1%2Bk3s1)
|
||||
you can now configure K3s using a
|
||||
[configuration file](https://rancher.com/docs/k3s/latest/en/installation/install-options/#configuration-file)
|
||||
rather than environment variables or command line arguments. The v2 release of
|
||||
this role has moved to the configuration file method rather than populating a
|
||||
systemd unit file with command-line arguments. There may be exceptions that are
|
||||
defined in [Global/Cluster Variables](#globalcluster-variables), however you will
|
||||
mostly be configuring k3s by configuration files using the `k3s_server` and
|
||||
`k3s_agent` variables.
|
||||
|
||||
See "_Server (Control Plane) Configuration_" and "_Agent (Worker) Configuraion_"
|
||||
below.
|
||||
|
||||
### Global/Cluster Variables
|
||||
|
||||
Below are variables that are set against all of the play hosts for environment
|
||||
consistency. These are generally cluster-level configuration.
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
|----------------------------------|---------------------------------------------------------------------------------|--------------------------------|
|
||||
| `k3s_state` | State of k3s: installed, started, stopped, downloaded, uninstalled, validated. | installed |
|
||||
| `k3s_release_version` | Use a specific version of k3s, eg. `v0.2.0`. Specify `false` for stable. | `false` |
|
||||
| `k3s_config_file` | Location of the k3s configuration file. | `/etc/rancher/k3s/config.yaml` |
|
||||
| `k3s_build_cluster` | When multiple play hosts are available, attempt to cluster. Read notes below. | `true` |
|
||||
| `k4s_registration_address` | Fixed registration address for nodes. IP or FQDN. | NULL |
|
||||
| `k3s_github_url` | Set the GitHub URL to install k3s from. | https://github.com/k3s-io/k3s |
|
||||
| `k3s_install_dir` | Installation directory for k3s. | `/usr/local/bin` |
|
||||
| `k3s_install_hard_links` | Install using hard links rather than symbolic links. | `false` |
|
||||
| `k3s_server_manifests_templates` | A list of Auto-Deploying Manifests Templates. | [] |
|
||||
| `k3s_use_experimental` | Allow the use of experimental features in k3s. | `false` |
|
||||
| `k3s_use_unsupported_config` | Allow the use of unsupported configurations in k3s. | `false` |
|
||||
| `k3s_etcd_datastore` | Enable etcd embedded datastore (read notes below). | `false` |
|
||||
| `k3s_debug` | Enable debug logging on the k3s service. | `false` |
|
||||
|
||||
### K3S Service Configuration
|
||||
|
||||
The below variables change how and when the systemd service unit file for K3S
|
||||
is run. Use this with caution, please refer to the [systemd documentation](https://www.freedesktop.org/software/systemd/man/systemd.unit.html#%5BUnit%5D%20Section%20Options)
|
||||
for more information.
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
|------------------------|----------------------------------------------------------------|---------------|
|
||||
| `k3s_start_on_boot` | Start k3s on boot. | `true` |
|
||||
| `k3s_service_requires` | List of required systemd units to k3s service unit. | [] |
|
||||
| `k3s_service_wants` | List of "wanted" systemd unit to k3s (weaker than "requires"). | []\* |
|
||||
| `k3s_service_before` | Start k3s before a defined list of systemd units. | [] |
|
||||
| `k3s_service_after` | Start k3s after a defined list of systemd units. | []\* |
|
||||
|
||||
\* The systemd unit template **always** specifies `network-online.target` for
|
||||
`wants` and `after`.
|
||||
|
||||
### Group/Host Variables
|
||||
|
||||
Below are variables that are set against individual or groups of play hosts.
|
||||
Typically you'd set these at group level for the control plane or worker nodes.
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
|--------------------|-------------------------------------------------------------------|---------------------------------------------------|
|
||||
| `k3s_control_node` | Specify if a host (or host group) are part of the control plane. | `false` (role will automatically delegate a node) |
|
||||
| `k3s_server` | Server (control plane) configuration, see notes below. | `{}` |
|
||||
| `k3s_agent` | Agent (worker) configuration, see notes below. | `{}` |
|
||||
|
||||
#### Server (Control Plane) Configuration
|
||||
|
||||
The control plane is configured with the `k3s_server` dict variable. Please
|
||||
refer to the below documentation for configuration options:
|
||||
|
||||
https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/
|
||||
|
||||
The `k3s_server` dictionary variable will contain flags from the above
|
||||
(removing the `--` prefix). Below is an example:
|
||||
|
||||
```yaml
|
||||
k3s_server:
|
||||
datastore-endpoint: postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable
|
||||
docker: true
|
||||
cluster-cidr: 172.20.0.0/16
|
||||
flannel-backend: 'none' # This needs to be in quotes
|
||||
disable:
|
||||
- traefik
|
||||
- coredns
|
||||
```
|
||||
|
||||
Alternatively, you can create a .yaml file and read it in to the `k3s_server`
|
||||
variable as per the below example:
|
||||
|
||||
```yaml
|
||||
k3s_server: "{{ lookup('file', 'path/to/k3s_server.yml') | from_yaml }}"
|
||||
```
|
||||
|
||||
Check out the [Documentation](documentation/README.md) for example
|
||||
configuration.
|
||||
|
||||
#### Agent (Worker) Configuration
|
||||
|
||||
Workers are configured with the `k3s_agent` dict variable. Please refer to the
|
||||
below documentation for configuration options:
|
||||
|
||||
https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config
|
||||
|
||||
The `k3s_agent` dictionary variable will contain flags from the above
|
||||
(removing the `--` prefix). Below is an example:
|
||||
|
||||
```yaml
|
||||
k3s_agent:
|
||||
with-node-id: true
|
||||
node-label:
|
||||
- "foo=bar"
|
||||
- "hello=world"
|
||||
```
|
||||
|
||||
Alternatively, you can create a .yaml file and read it in to the `k3s_agent`
|
||||
variable as per the below example:
|
||||
|
||||
```yaml
|
||||
k3s_agent: "{{ lookup('file', 'path/to/k3s_agent.yml') | from_yaml }}"
|
||||
```
|
||||
|
||||
Check out the [Documentation](documentation/README.md) for example
|
||||
configuration.
|
||||
|
||||
### Ansible Controller Configuration Variables
|
||||
|
||||
The below variables are used to change the way the role executes in Ansible,
|
||||
particularly with regards to privilege escalation.
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
|-------------------------------------|---------------------------------------------------------------------|---------------|
|
||||
| `k3s_skip_validation` | Skip all tasks that validate configuration. | `false` |
|
||||
| `k3s_skip_env_checks` | Skill all tasks that check environment configuration. | `false` |
|
||||
| `k3s_become_for_all` | Escalate user privileges for all tasks. Overrides all of the below. | `false` |
|
||||
| `k3s_become_for_systemd` | Escalate user privileges for systemd tasks. | NULL |
|
||||
| `k3s_become_for_install_dir` | Escalate user privileges for creating installation directories. | NULL |
|
||||
| `k3s_become_for_directory_creation` | Escalate user privileges for creating application directories. | NULL |
|
||||
| `k3s_become_for_usr_local_bin` | Escalate user privileges for writing to `/usr/local/bin`. | NULL |
|
||||
| `k3s_become_for_package_install` | Escalate user privileges for installing k3s. | NULL |
|
||||
| `k3s_become_for_kubectl` | Escalate user privileges for running `kubectl`. | NULL |
|
||||
| `k3s_become_for_uninstall` | Escalate user privileges for uninstalling k3s. | NULL |
|
||||
|
||||
#### Important note about `k3s_release_version`
|
||||
|
||||
If you do not set a `k3s_release_version` the latest version from the stable
|
||||
channel of k3s will be installed. If you are developing against a specific
|
||||
version of k3s you must ensure this is set in your Ansible configuration, eg:
|
||||
|
||||
```yaml
|
||||
k3s_release_version: v1.19.3+k3s1
|
||||
```
|
||||
|
||||
It is also possible to install specific K3s "Channels", below are some
|
||||
examples for `k3s_release_version`:
|
||||
|
||||
```yaml
|
||||
k3s_release_version: false # defaults to 'stable' channel
|
||||
k3s_release_version: stable # latest 'stable' release
|
||||
k3s_release_version: testing # latest 'testing' release
|
||||
k3s_release_version: v1.19 # latest 'v1.19' release
|
||||
k3s_release_version: v1.19.3+k3s3 # specific release
|
||||
|
||||
# Specific commit
|
||||
# CAUTION - only used for testing - must be 40 characters
|
||||
k3s_release_version: 48ed47c4a3e420fa71c18b2ec97f13dc0659778b
|
||||
```
|
||||
|
||||
#### Important note about `k3s_install_hard_links`
|
||||
|
||||
If you are using the [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller)
|
||||
you will need to use hard links rather than symbolic links as the controller
|
||||
will not be able to follow symbolic links. This option has been added however
|
||||
is not enabled by default to avoid breaking existing installations.
|
||||
|
||||
To enable the use of hard links, ensure `k3s_install_hard_links` is set
|
||||
to `true`.
|
||||
|
||||
```yaml
|
||||
k3s_install_hard_links: true
|
||||
```
|
||||
|
||||
The result of this can be seen by running the following in `k3s_install_dir`:
|
||||
|
||||
`ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort`
|
||||
|
||||
Symbolic Links:
|
||||
|
||||
```text
|
||||
[root@node1 bin]# ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort
|
||||
3277823 -rwxr-xr-x 1 root root 52M Jul 25 12:50 k3s-v1.18.4+k3s1
|
||||
3279565 lrwxrwxrwx 1 root root 31 Jul 25 12:52 k3s -> /usr/local/bin/k3s-v1.18.6+k3s1
|
||||
3279644 -rwxr-xr-x 1 root root 51M Jul 25 12:52 k3s-v1.18.6+k3s1
|
||||
3280079 lrwxrwxrwx 1 root root 31 Jul 25 12:52 ctr -> /usr/local/bin/k3s-v1.18.6+k3s1
|
||||
3280080 lrwxrwxrwx 1 root root 31 Jul 25 12:52 crictl -> /usr/local/bin/k3s-v1.18.6+k3s1
|
||||
3280081 lrwxrwxrwx 1 root root 31 Jul 25 12:52 kubectl -> /usr/local/bin/k3s-v1.18.6+k3s1
|
||||
```
|
||||
|
||||
Hard Links:
|
||||
|
||||
```text
|
||||
[root@node1 bin]# ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort
|
||||
3277823 -rwxr-xr-x 1 root root 52M Jul 25 12:50 k3s-v1.18.4+k3s1
|
||||
3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 crictl
|
||||
3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 ctr
|
||||
3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 k3s
|
||||
3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 k3s-v1.18.6+k3s1
|
||||
3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 kubectl
|
||||
```
|
||||
|
||||
#### Important note about `k3s_build_cluster`
|
||||
|
||||
If you set `k3s_build_cluster` to `false`, this role will install each play
|
||||
host as a standalone node. An example of when you might use this would be
|
||||
when building a large number of standalone IoT devices running K3s. Below is a
|
||||
hypothetical situation where we are to deploy 25 Raspberry Pi devices, each a
|
||||
standalone system and not a cluster of 25 nodes. To do this we'd use a playbook
|
||||
similar to the below:
|
||||
|
||||
```yaml
|
||||
- hosts: k3s_nodes # eg. 25 RPi's defined in our inventory.
|
||||
vars:
|
||||
k3s_build_cluster: false
|
||||
roles:
|
||||
- xanmanning.k3s
|
||||
```
|
||||
|
||||
#### Important note about `k3s_control_node` and High Availability (HA)
|
||||
|
||||
By default only one host will be defined as a control node by Ansible, If you
|
||||
do not set a host as a control node, this role will automatically delegate
|
||||
the first play host as a control node. This is not suitable for use within
|
||||
a Production workload.
|
||||
|
||||
If multiple hosts have `k3s_control_node` set to `true`, you must also set
|
||||
`datastore-endpoint` in `k3s_server` as the connection string to a MySQL or
|
||||
PostgreSQL database, or external Etcd cluster else the play will fail.
|
||||
|
||||
If using TLS, the CA, Certificate and Key need to already be available on
|
||||
the play hosts.
|
||||
|
||||
See: [High Availability with an External DB](https://rancher.com/docs/k3s/latest/en/installation/ha/)
|
||||
|
||||
It is also possible, though not supported, to run a single K3s control node
|
||||
with a `datastore-endpoint` defined. As this is not a typically supported
|
||||
configuration you will need to set `k3s_use_unsupported_config` to `true`.
|
||||
|
||||
Since K3s v1.19.1 it is possible to use an embedded Etcd as the backend
|
||||
database, and this is done by setting `k3s_etcd_datastore` to `true`.
|
||||
The best practice for Etcd is to define at least 3 members to ensure quorum is
|
||||
established. In addition to this, an odd number of members is recommended to
|
||||
ensure a majority in the event of a network partition. If you want to use 2
|
||||
members or an even number of members, please set `k3s_use_unsupported_config`
|
||||
to `true`.
|
||||
|
||||
## Dependencies
|
||||
|
||||
No dependencies on other roles.
|
||||
|
||||
## Example Playbooks
|
||||
|
||||
Example playbook, single control node running `testing` channel k3s:
|
||||
|
||||
```yaml
|
||||
- hosts: k3s_nodes
|
||||
roles:
|
||||
- { role: xanmanning.k3s, k3s_release_version: testing }
|
||||
```
|
||||
|
||||
Example playbook, Highly Available with PostgreSQL database running the latest
|
||||
stable release:
|
||||
|
||||
```yaml
|
||||
- hosts: k3s_nodes
|
||||
vars:
|
||||
k3s_registration_address: loadbalancer # Typically a load balancer.
|
||||
k3s_server:
|
||||
datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable"
|
||||
pre_tasks:
|
||||
- name: Set each node to be a control node
|
||||
ansible.builtin.set_fact:
|
||||
k3s_control_node: true
|
||||
when: inventory_hostname in ['node2', 'node3']
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
[BSD 3-clause](LICENSE.txt)
|
||||
|
||||
## Contributors
|
||||
|
||||
Contributions from the community are very welcome, but please read the
|
||||
[contribution guidelines](CONTRIBUTING.md) before doing so, this will help
|
||||
make things as streamlined as possible.
|
||||
|
||||
Also, please check out the awesome
|
||||
[list of contributors](https://github.com/PyratLabs/ansible-role-k3s/graphs/contributors).
|
||||
|
||||
## Author Information
|
||||
|
||||
[Xan Manning](https://xan.manning.io/)
|
99
server/ansible/roles/xanmanning.k3s/defaults/main.yml
Normal file
99
server/ansible/roles/xanmanning.k3s/defaults/main.yml
Normal file
@@ -0,0 +1,99 @@
|
||||
---
|
||||
|
||||
##
|
||||
# Global/Cluster Configuration
|
||||
##
|
||||
|
||||
# k3s state, options: installed, started, stopped, restarted, uninstalled, validated
|
||||
# (default: installed)
|
||||
k3s_state: installed
|
||||
|
||||
# Use a specific k3s version, if set to "false" we will get the latest
|
||||
# k3s_release_version: v1.19.3
|
||||
k3s_release_version: false
|
||||
|
||||
# Loction of the k3s configuration file
|
||||
k3s_config_file: /etc/rancher/k3s/config.yaml
|
||||
|
||||
# When multiple ansible_play_hosts_all are present, attempt to cluster the nodes.
|
||||
# Using false will create multiple standalone nodes.
|
||||
# (default: true)
|
||||
k3s_build_cluster: true
|
||||
|
||||
# URL for GitHub project
|
||||
k3s_github_url: https://github.com/k3s-io/k3s
|
||||
|
||||
# Skip all tasks that validate configuration
|
||||
k3s_skip_validation: false
|
||||
|
||||
# Skip all tasks that check environment configuration
|
||||
k3s_skip_env_checks: false
|
||||
|
||||
# Installation directory for k3s
|
||||
k3s_install_dir: /usr/local/bin
|
||||
|
||||
# Install using hard links rather than symbolic links
|
||||
k3s_install_hard_links: false
|
||||
|
||||
# A list of templates used for preconfigure the cluster.
|
||||
k3s_server_manifests_templates: []
|
||||
|
||||
# Use experimental features in k3s?
|
||||
k3s_use_experimental: false
|
||||
|
||||
# Allow for unsupported configurations in k3s?
|
||||
k3s_use_unsupported_config: false
|
||||
|
||||
# Enable etcd embedded datastore
|
||||
k3s_etcd_datastore: false
|
||||
|
||||
##
|
||||
# Systemd config
|
||||
##
|
||||
|
||||
# Start k3s on system boot
|
||||
k3s_start_on_boot: true
|
||||
|
||||
# List of required systemd units to k3s service unit.
|
||||
k3s_service_requires: []
|
||||
|
||||
# List of "wanted" systemd unit to k3s (weaker than "requires").
|
||||
k3s_service_wants: []
|
||||
|
||||
# Start k3s before a defined list of systemd units.
|
||||
k3s_service_before: []
|
||||
|
||||
# Start k3s after a defined list of systemd units.
|
||||
k3s_service_after: []
|
||||
|
||||
##
|
||||
# Server Configuration
|
||||
##
|
||||
|
||||
k3s_server: {}
|
||||
# k3s_server:
|
||||
# listen-port: 6443
|
||||
|
||||
##
|
||||
# Agent Configuration
|
||||
##
|
||||
|
||||
k3s_agent: {}
|
||||
# k3s_agent:
|
||||
# node-label:
|
||||
# - "foo=bar"
|
||||
# - "bish=bosh"
|
||||
|
||||
##
|
||||
# Ansible Controller configuration
|
||||
##
|
||||
|
||||
# Use become privileges for
|
||||
k3s_become_for_all: false
|
||||
k3s_become_for_systemd: null
|
||||
k3s_become_for_install_dir: null
|
||||
k3s_become_for_directory_creation: null
|
||||
k3s_become_for_usr_local_bin: null
|
||||
k3s_become_for_package_install: null
|
||||
k3s_become_for_kubectl: null
|
||||
k3s_become_for_uninstall: null
|
43
server/ansible/roles/xanmanning.k3s/documentation/README.md
Normal file
43
server/ansible/roles/xanmanning.k3s/documentation/README.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# ansible-role-k3s
|
||||
|
||||
This document describes a number of ways of consuming this Ansible role for use
|
||||
in your own k3s deployments. It will not be able to cover every use case
|
||||
scenario but will provide some common example configurations.
|
||||
|
||||
## Requirements
|
||||
|
||||
Before you start you will need an Ansible controller. This can either be your
|
||||
workstation, or a dedicated system that you have access to. The instructions
|
||||
in this documentation assume you are using `ansible` CLI, there are no
|
||||
instructions available for Ansible Tower at this time.
|
||||
|
||||
Follow the below guide to get Ansible installed.
|
||||
|
||||
https://docs.ansible.com/ansible/latest/installation_guide/index.html
|
||||
|
||||
## Quickstart
|
||||
|
||||
Below are quickstart examples for a single node k3s server, a k3s cluster
|
||||
with a single control node and HA k3s cluster. These represent the bare
|
||||
minimum configuration.
|
||||
|
||||
- [Single node k3s](quickstart-single-node.md)
|
||||
- [Simple k3s cluster](quickstart-cluster.md)
|
||||
- [HA k3s cluster using embedded etcd](quickstart-ha-cluster.md)
|
||||
|
||||
## Example configurations and operations
|
||||
|
||||
### Configuration
|
||||
|
||||
- [Setting up 2-node HA control plane with external datastore](configuration/2-node-ha-ext-datastore.md)
|
||||
- [Provision multiple standalone k3s nodes](configuration/multiple-standalone-k3s-nodes.md)
|
||||
- [Set node labels and component arguments](configuration/node-labels-and-component-args.md)
|
||||
- [Use an alternate CNI](configuration/use-an-alternate-cni.md)
|
||||
- [Start K3S after another service](configuration/systemd-config.md)
|
||||
|
||||
### Operations
|
||||
|
||||
- [Stop/Start a cluster](operations/stop-start-cluster.md)
|
||||
- [Updating k3s](operations/updating-k3s.md)
|
||||
- [Extending a cluster](operations/extending-a-cluster.md)
|
||||
- [Shrinking a cluster](operations/shrinking-a-cluster.md)
|
@@ -0,0 +1,79 @@
|
||||
# 2 Node HA Control Plane with external database
|
||||
|
||||
For this configuration we are deploying a highly available control plane
|
||||
composed of two control nodes. This can be achieved with embedded etcd, however
|
||||
etcd ideally has an odd number of nodes.
|
||||
|
||||
The example below will use an external PostgreSQL datastore to store the
|
||||
cluster state information.
|
||||
|
||||
Main guide: https://rancher.com/docs/k3s/latest/en/installation/ha/
|
||||
|
||||
## Architecture
|
||||
|
||||
```text
|
||||
+---------------+
|
||||
| Load Balancer |
|
||||
+-------+-------+
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
+------------+ | +------------+
|
||||
| | | | |
|
||||
+--------+ control-01 +<-----+----->+ control-02 |
|
||||
| | | | |
|
||||
| +-----+------+ +------+-----+
|
||||
| | |
|
||||
| +-------------+-------------+
|
||||
| | | |
|
||||
| +------v----+ +-----v-----+ +----v------+
|
||||
| | | | | | |
|
||||
| | worker-01 | | worker-02 | | worker-03 |
|
||||
| | | | | | |
|
||||
| +-----------+ +-----------+ +-----------+
|
||||
|
|
||||
| +-------+ +-------+
|
||||
| | | | |
|
||||
+-------------------> db-01 +--+ db-02 |
|
||||
| | | |
|
||||
+-------+ +-------+
|
||||
```
|
||||
|
||||
### Required Components
|
||||
|
||||
- Load balancer
|
||||
- 2 control plane nodes
|
||||
- 1 or more worker nodes
|
||||
- PostgreSQL Database (replicated, or Linux HA Cluster).
|
||||
|
||||
## Configuration
|
||||
|
||||
For your control nodes, you will need to instruct the control plane of the
|
||||
PostgreSQL datastore endpoint and set `k3s_control_node_address` to be the
|
||||
hostname or IP of your load balancer.
|
||||
|
||||
Below is the example for PostgreSQL, it is possible to use MySQL or an Etcd
|
||||
cluster as well. Consult the below guide for using alternative datastore
|
||||
endpoints.
|
||||
|
||||
https://rancher.com/docs/k3s/latest/en/installation/datastore/#datastore-endpoint-format-and-functionality
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_server:
|
||||
datastore-endpoint: postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable
|
||||
node-taint:
|
||||
- "k3s-controlplane=true:NoExecute"
|
||||
```
|
||||
|
||||
Your worker nodes need to know how to connect to the control plane, this is
|
||||
defined by setting `k3s_control_node_address` to the hostname or IP address of
|
||||
the load balancer.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_control_node_address: control.examplek3s.com
|
||||
```
|
@@ -0,0 +1,71 @@
|
||||
# Multiple standalone K3s nodes
|
||||
|
||||
This is an example of when you might want to configure multiple standalone
|
||||
k3s nodes simultaneously. For this we will assume a hypothetical situation
|
||||
where we are configuring 25 Raspberry Pis to deploy to our shop floors.
|
||||
|
||||
Each Rasperry Pi will be configured as a standalone IoT device hosting an
|
||||
application that will push data to head office.
|
||||
|
||||
## Architecture
|
||||
|
||||
```text
|
||||
+-------------+
|
||||
| |
|
||||
| Node-01 +-+
|
||||
| | |
|
||||
+--+----------+ +-+
|
||||
| | |
|
||||
+--+---------+ +-+
|
||||
| | |
|
||||
+--+--------+ |
|
||||
| | Node-N
|
||||
+----------+
|
||||
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Below is our example inventory of 200 nodes (Truncated):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_workers:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
# ..... SNIP .....
|
||||
|
||||
kube-199:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.201
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-200:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.202
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
```
|
||||
|
||||
In our `group_vars/` (or as `vars:` in our playbook), we will need to set the
|
||||
`k3s_build_cluster` variable to `false`. This will stop the role from
|
||||
attempting to cluster all 200 nodes, instead it will install k3s across each
|
||||
node as as 200 standalone servers.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_build_cluster: false
|
||||
```
|
@@ -0,0 +1,39 @@
|
||||
# Configure node labels and component arguments
|
||||
|
||||
The following command line arguments can be specified multiple times with
|
||||
`key=value` pairs:
|
||||
|
||||
- `--kube-kubelet-arg`
|
||||
- `--kube-proxy-arg`
|
||||
- `--kube-apiserver-arg`
|
||||
- `--kube-scheduler-arg`
|
||||
- `--kube-controller-manager-arg`
|
||||
- `--kube-cloud-controller-manager-arg`
|
||||
- `--node-label`
|
||||
- `--node-taint`
|
||||
|
||||
In the config file, this is done by defining a list of values for each
|
||||
command like argument, for example:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_server:
|
||||
# Set the plugins registry directory
|
||||
kubelet-arg:
|
||||
- "volume-plugin-dir=/var/lib/rancher/k3s/agent/kubelet/plugins_registry"
|
||||
# Set the pod eviction timeout and node monitor grace period
|
||||
kube-controller-manager-arg:
|
||||
- "pod-eviction-timeout=2m"
|
||||
- "node-monitor-grace-period=30s"
|
||||
# Set API server feature gate
|
||||
kube-apiserver-arg:
|
||||
- "feature-gates=RemoveSelfLink=false"
|
||||
# Laels to apply to a node
|
||||
node-label:
|
||||
- "NodeTier=development"
|
||||
- "NodeLocation=eu-west-2a"
|
||||
# Stop k3s control plane having workloads scheduled on them
|
||||
node-taint:
|
||||
- "k3s-controlplane=true:NoExecute"
|
||||
```
|
@@ -0,0 +1,19 @@
|
||||
# systemd config
|
||||
|
||||
Below are examples to tweak how and when K3S starts up.
|
||||
|
||||
## Wanted service units
|
||||
|
||||
In this example, we're going to start K3S after Wireguard. Our example server
|
||||
has a Wireguard connection `wg0`. We are using "wants" rather than "requires"
|
||||
as it's a weaker requirement that Wireguard must be running. We then want
|
||||
K3S to start after Wireguard has started.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_service_wants:
|
||||
- wg-quick@wg0.service
|
||||
k3s_service_after:
|
||||
- wg-quick@wg0.service
|
||||
```
|
@@ -0,0 +1,63 @@
|
||||
# Use an alternate CNI
|
||||
|
||||
K3S ships with Flannel, however sometimes you want an different CNI such as
|
||||
Calico, Canal or Weave Net. To do this you will need to disable Flannel with
|
||||
`flannel-backend: "none"`, specify a `cluster-cidr` and add your CNI manifests
|
||||
to the `k3s_server_manifests_templates`.
|
||||
|
||||
## Calico example
|
||||
|
||||
The below is based on the
|
||||
[Calico quickstart documentation](https://docs.projectcalico.org/getting-started/kubernetes/quickstart).
|
||||
|
||||
Steps:
|
||||
|
||||
1. Download `tigera-operator.yaml` to the manifests directory.
|
||||
1. Download `custom-resources.yaml` to the manifests directory.
|
||||
1. Choose a `cluster-cidr` (we are using 192.168.0.0/16)
|
||||
1. Set `k3s_server` and `k3s_server_manifest_templates` as per the below,
|
||||
ensure the paths to manifests are correct for your project repo.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
# K3S Server config, don't deploy flannel and set cluster pod CIDR.
|
||||
k3s_server:
|
||||
cluster-cidr: 192.168.0.0/16
|
||||
flannel-backend: "none"
|
||||
|
||||
# Deploy the following k3s server templates.
|
||||
k3s_server_manifests_templates:
|
||||
- "manifests/calico/tigera-operator.yaml"
|
||||
- "manifests/calico/custom-resources.yaml"
|
||||
```
|
||||
|
||||
All nodes should come up as "Ready", below is a 3-node cluster:
|
||||
|
||||
```text
|
||||
$ kubectl get nodes -o wide -w
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
kube-0 Ready control-plane,etcd,master 114s v1.20.2+k3s1 10.10.9.2 10.10.9.2 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1
|
||||
kube-1 Ready control-plane,etcd,master 80s v1.20.2+k3s1 10.10.9.3 10.10.9.3 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1
|
||||
kube-2 Ready control-plane,etcd,master 73s v1.20.2+k3s1 10.10.9.4 10.10.9.4 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1
|
||||
```
|
||||
|
||||
Pods should be deployed with deployed within the CIDR specified in our config
|
||||
file.
|
||||
|
||||
```text
|
||||
$ kubectl get pods -o wide -A
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
calico-system calico-kube-controllers-cfb4ff54b-8rp8r 1/1 Running 0 5m4s 192.168.145.65 kube-0 <none> <none>
|
||||
calico-system calico-node-2cm2m 1/1 Running 0 5m4s 10.10.9.2 kube-0 <none> <none>
|
||||
calico-system calico-node-2s6lx 1/1 Running 0 4m42s 10.10.9.4 kube-2 <none> <none>
|
||||
calico-system calico-node-zwqjz 1/1 Running 0 4m49s 10.10.9.3 kube-1 <none> <none>
|
||||
calico-system calico-typha-7b6747d665-78swq 1/1 Running 0 3m5s 10.10.9.4 kube-2 <none> <none>
|
||||
calico-system calico-typha-7b6747d665-8ff66 1/1 Running 0 3m5s 10.10.9.3 kube-1 <none> <none>
|
||||
calico-system calico-typha-7b6747d665-hgplx 1/1 Running 0 5m5s 10.10.9.2 kube-0 <none> <none>
|
||||
kube-system coredns-854c77959c-6qhgt 1/1 Running 0 5m20s 192.168.145.66 kube-0 <none> <none>
|
||||
kube-system helm-install-traefik-4czr9 0/1 Completed 0 5m20s 192.168.145.67 kube-0 <none> <none>
|
||||
kube-system metrics-server-86cbb8457f-qcxf5 1/1 Running 0 5m20s 192.168.145.68 kube-0 <none> <none>
|
||||
kube-system traefik-6f9cbd9bd4-7h4rl 1/1 Running 0 2m50s 192.168.126.65 kube-1 <none> <none>
|
||||
tigera-operator tigera-operator-b6c4bfdd9-29hhr 1/1 Running 0 5m20s 10.10.9.2 kube-0 <none> <none>
|
||||
```
|
@@ -0,0 +1,69 @@
|
||||
# Extending a cluster
|
||||
|
||||
This document describes the method for extending an cluster with new worker
|
||||
nodes.
|
||||
|
||||
## Assumptions
|
||||
|
||||
It is assumed that you have already deployed a k3s cluster using this role,
|
||||
you have an appropriately configured inventory and playbook to create the
|
||||
cluster.
|
||||
|
||||
Below, our example inventory and playbook are as follows:
|
||||
|
||||
- inventory: `inventory.yml`
|
||||
- playbook: `cluster.yml`
|
||||
|
||||
Currently your `inventory.yml` looks like this, it has two nodes defined,
|
||||
`kube-0` (control node) and `kube-1` (worker node).
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
```
|
||||
|
||||
## Method
|
||||
|
||||
We have our two nodes, one control, one worker. The goal is to extend this to
|
||||
add capacity by adding a new worker node, `kube-2`. To do this we will add the
|
||||
new node to our inventory.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
```
|
||||
|
||||
Once the new node has been added, you can re-run the automation to join it to
|
||||
the cluster. You should expect the majority of changes to the worker node being
|
||||
introduced to the cluster.
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=53 changed=1 unreachable=0 failed=0 skipped=30 rescued=0 ignored=0
|
||||
kube-1 : ok=40 changed=1 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0
|
||||
kube-2 : ok=42 changed=10 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0
|
||||
```
|
@@ -0,0 +1,74 @@
|
||||
# Shrinking a cluster
|
||||
|
||||
This document describes the method for shrinking a cluster, by removing a
|
||||
worker nodes.
|
||||
|
||||
## Assumptions
|
||||
|
||||
It is assumed that you have already deployed a k3s cluster using this role,
|
||||
you have an appropriately configured inventory and playbook to create the
|
||||
cluster.
|
||||
|
||||
Below, our example inventory and playbook are as follows:
|
||||
|
||||
- inventory: `inventory.yml`
|
||||
- playbook: `cluster.yml`
|
||||
|
||||
Currently your `inventory.yml` looks like this, it has three nodes defined,
|
||||
`kube-0` (control node) and `kube-1`, `kube-2` (worker nodes).
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
```
|
||||
|
||||
## Method
|
||||
|
||||
We have our three nodes, one control, two workers. The goal is to shrink this to
|
||||
remove excess capacity by offboarding the worker node `kube-2`. To do this we
|
||||
will set `kube-2` node to `k3s_state: uninstalled` in our inventory.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_state: uninstalled
|
||||
```
|
||||
|
||||
What you will typically see is changes to your control plane (`kube-0`) and the
|
||||
node being removed (`kube-2`). The role will register the removal of the node
|
||||
with the cluster by draining the node and removing it from the cluster.
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=55 changed=2 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
kube-1 : ok=40 changed=0 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0
|
||||
kube-2 : ok=23 changed=2 unreachable=0 failed=0 skipped=17 rescued=0 ignored=1
|
||||
```
|
@@ -0,0 +1,93 @@
|
||||
# Stopping and Starting a cluster
|
||||
|
||||
This document describes the Ansible method for restarting a k3s cluster
|
||||
deployed by this role.
|
||||
|
||||
## Assumptions
|
||||
|
||||
It is assumed that you have already deployed a k3s cluster using this role,
|
||||
you have an appropriately configured inventory and playbook to create the
|
||||
cluster.
|
||||
|
||||
Below, our example inventory and playbook are as follows:
|
||||
|
||||
- inventory: `inventory.yml`
|
||||
- playbook: `cluster.yml`
|
||||
|
||||
## Method
|
||||
|
||||
### Start cluster
|
||||
|
||||
You can start the cluster using either of the following commands:
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=started'`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=started' --become all`
|
||||
|
||||
Below is example output, remember that Ansible is idempotent so re-running a
|
||||
command may not necessarily change the state.
|
||||
|
||||
**Playbook method output**:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-1 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-2 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
### Stop cluster
|
||||
|
||||
You can stop the cluster using either of the following commands:
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=stopped'`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=stopped' --become all`
|
||||
|
||||
Below is example output, remember that Ansible is idempotent so re-running a
|
||||
command may not necessarily change the state.
|
||||
|
||||
**Playbook method output**:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-1 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-2 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
### Restart cluster
|
||||
|
||||
Just like the `service` module, you can also specify `restarted` as a state.
|
||||
This will do `stop` followed by `start`.
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=restarted'`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=restarted' --become all`
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
kube-1 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
kube-2 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Tips
|
||||
|
||||
You can limit the targets by adding the `-l` flag to your `ansible-playbook`
|
||||
command, or simply target your ad-hoc commands. For example, in a 3 node
|
||||
cluster (called `kube-0`, `kube-1` and `kube-2`) we can limit the restart to
|
||||
`kube-1` and `kube-2` with the following:
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=restarted' -l "kube-1,kube-2"`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=restarted' --become "kube-1,kube-2"`
|
||||
|
||||
```text
|
||||
PLAY RECAP ********************************************************************************************************
|
||||
kube-1 : ok=7 changed=2 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
kube-2 : ok=7 changed=2 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
1. _Why might I use the `ansible-playbook` command over an ad-hoc command?_
|
||||
- The stop/start tasks will be aware of configuration. As the role
|
||||
develops, there might be some pre-tasks added to change how a cluster
|
||||
is stopped or started.
|
@@ -0,0 +1,52 @@
|
||||
# Updating k3s
|
||||
|
||||
## Before you start!
|
||||
|
||||
Ensure you back up your k3s cluster. This is particularly important if you use
|
||||
an external datastore or embedded Etcd. Please refer to the below guide to
|
||||
backing up your k3s datastore:
|
||||
|
||||
https://rancher.com/docs/k3s/latest/en/backup-restore/
|
||||
|
||||
Also, check your volume backups are also working!
|
||||
|
||||
## Proceedure
|
||||
|
||||
### Updates using Ansible
|
||||
|
||||
To update via Ansible, set `k3s_release_version` to the target version you wish
|
||||
to go to. For example, from your `v1.19.3+k3s1` playbook:
|
||||
|
||||
```yaml
|
||||
---
|
||||
# BEFORE
|
||||
|
||||
- name: Provision k3s cluster
|
||||
hosts: k3s_cluster
|
||||
roles:
|
||||
- name: xanmanning.k3s
|
||||
vars:
|
||||
k3s_release_version: v1.19.3+k3s1
|
||||
```
|
||||
|
||||
Updating to `v1.20.2+k3s1`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
# AFTER
|
||||
|
||||
- name: Provision k3s cluster
|
||||
hosts: k3s_cluster
|
||||
roles:
|
||||
- name: xanmanning.k3s
|
||||
vars:
|
||||
k3s_release_version: v1.20.2+k3s1
|
||||
```
|
||||
|
||||
### Automatic updates
|
||||
|
||||
For automatic updates, consider installing Rancher's
|
||||
[system-upgrade-controller](https://rancher.com/docs/k3s/latest/en/upgrades/automated/)
|
||||
|
||||
**Please note**, to be able to update using the system-upgrade-controller you
|
||||
will need to set `k3s_install_hard_links` to `true`.
|
@@ -0,0 +1,147 @@
|
||||
# Quickstart: K3s cluster with a single control node
|
||||
|
||||
This is the quickstart guide to creating your own k3s cluster with one control
|
||||
plane node. This control plane node will also be a worker.
|
||||
|
||||
:hand: This example requires your Ansible user to be able to connect to the
|
||||
servers over SSH using key-based authentication. The user is also has an entry
|
||||
in a sudoers file that allows privilege escalation without requiring a
|
||||
password.
|
||||
|
||||
To test this is the case, run the following check replacing `<ansible_user>`
|
||||
and `<server_name>`. The expected output is `Works`
|
||||
|
||||
`ssh <ansible_user>@<server_name> 'sudo cat /etc/shadow >/dev/null && echo "Works"'`
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"'
|
||||
Works
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $
|
||||
```
|
||||
|
||||
## Directory structure
|
||||
|
||||
Our working directory will have the following files:
|
||||
|
||||
```text
|
||||
kubernetes-playground/
|
||||
|_ inventory.yml
|
||||
|_ cluster.yml
|
||||
```
|
||||
|
||||
## Inventory
|
||||
|
||||
Here's a YAML based example inventory for our servers called `inventory.yml`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
```
|
||||
|
||||
We can test this works with `ansible -i inventory.yml -m ping all`, expected
|
||||
result:
|
||||
|
||||
```text
|
||||
kube-0 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-2 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Playbook
|
||||
|
||||
Here is our playbook for the k3s cluster (`cluster.yml`):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
- name: Build a cluster with a single control node
|
||||
hosts: k3s_cluster
|
||||
vars:
|
||||
k3s_become_for_all: true
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
```
|
||||
|
||||
## Execution
|
||||
|
||||
To execute the playbook against our inventory file, we will run the following
|
||||
command:
|
||||
|
||||
`ansible-playbook -i inventory.yml cluster.yml`
|
||||
|
||||
The output we can expect is similar to the below, with no failed or unreachable
|
||||
nodes. The default behavior of this role is to delegate the first play host as
|
||||
the control node, so kube-0 will have more changed tasks than others:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=56 changed=11 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
kube-1 : ok=43 changed=10 unreachable=0 failed=0 skipped=32 rescued=0 ignored=0
|
||||
kube-2 : ok=43 changed=10 unreachable=0 failed=0 skipped=32 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
After logging into kube-0, we can test that k3s is running across the cluster,
|
||||
that all nodes are ready and that everything is ready to execute our Kubernetes
|
||||
workloads by running the following:
|
||||
|
||||
- `sudo kubectl get nodes -o wide`
|
||||
- `sudo kubectl get pods -o wide --all-namespaces`
|
||||
|
||||
:hand: Note we are using `sudo` because we need to be root to access the
|
||||
kube config for this node. This behavior can be changed with specifying
|
||||
`write-kubeconfig-mode: 0644` in `k3s_server`.
|
||||
|
||||
**Get Nodes**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
kube-0 Ready master 34s v1.19.4+k3s1 10.0.2.15 <none> Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-2 Ready <none> 14s v1.19.4+k3s1 10.0.2.17 <none> Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-1 Ready <none> 14s v1.19.4+k3s1 10.0.2.16 <none> Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
ansible@kube-0:~$
|
||||
```
|
||||
|
||||
**Get Pods**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get pods -o wide --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
kube-system local-path-provisioner-7ff9579c6-72j8x 1/1 Running 0 55s 10.42.2.2 kube-1 <none> <none>
|
||||
kube-system metrics-server-7b4f8b595-lkspj 1/1 Running 0 55s 10.42.1.2 kube-2 <none> <none>
|
||||
kube-system helm-install-traefik-b6vnt 0/1 Completed 0 55s 10.42.0.3 kube-0 <none> <none>
|
||||
kube-system coredns-66c464876b-llsh7 1/1 Running 0 55s 10.42.0.2 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-jrqg7 2/2 Running 0 27s 10.42.1.3 kube-2 <none> <none>
|
||||
kube-system svclb-traefik-gh65q 2/2 Running 0 27s 10.42.0.4 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-5z7zp 2/2 Running 0 27s 10.42.2.3 kube-1 <none> <none>
|
||||
kube-system traefik-5dd496474-l2k74 1/1 Running 0 27s 10.42.1.4 kube-2 <none> <none>
|
||||
```
|
@@ -0,0 +1,154 @@
|
||||
# Quickstart: K3s cluster with a HA control plane using embedded etcd
|
||||
|
||||
This is the quickstart guide to creating your own 3 node k3s cluster with a
|
||||
highly available control plane using the embedded etcd datastore.
|
||||
The control plane will all be workers as well.
|
||||
|
||||
:hand: This example requires your Ansible user to be able to connect to the
|
||||
servers over SSH using key-based authentication. The user is also has an entry
|
||||
in a sudoers file that allows privilege escalation without requiring a
|
||||
password.
|
||||
|
||||
To test this is the case, run the following check replacing `<ansible_user>`
|
||||
and `<server_name>`. The expected output is `Works`
|
||||
|
||||
`ssh <ansible_user>@<server_name> 'sudo cat /etc/shadow >/dev/null && echo "Works"'`
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"'
|
||||
Works
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $
|
||||
```
|
||||
|
||||
## Directory structure
|
||||
|
||||
Our working directory will have the following files:
|
||||
|
||||
```text
|
||||
kubernetes-playground/
|
||||
|_ inventory.yml
|
||||
|_ ha_cluster.yml
|
||||
```
|
||||
|
||||
## Inventory
|
||||
|
||||
Here's a YAML based example inventory for our servers called `inventory.yml`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
# We're adding k3s_control_node to each host, this can be done in host_vars/
|
||||
# or group_vars/ as well - but for simplicity we are setting it here.
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_control_node: true
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_control_node: true
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_control_node: true
|
||||
|
||||
```
|
||||
|
||||
We can test this works with `ansible -i inventory.yml -m ping all`, expected
|
||||
result:
|
||||
|
||||
```text
|
||||
kube-0 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-2 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Playbook
|
||||
|
||||
Here is our playbook for the k3s cluster (`ha_cluster.yml`):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
- name: Build a cluster with HA control plane
|
||||
hosts: k3s_cluster
|
||||
vars:
|
||||
k3s_become_for_all: true
|
||||
k3s_etcd_datastore: true
|
||||
k3s_use_experimental: true # Note this is required for k3s < v1.19.5+k3s1
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
```
|
||||
|
||||
## Execution
|
||||
|
||||
To execute the playbook against our inventory file, we will run the following
|
||||
command:
|
||||
|
||||
`ansible-playbook -i inventory.yml ha_cluster.yml`
|
||||
|
||||
The output we can expect is similar to the below, with no failed or unreachable
|
||||
nodes. The default behavior of this role is to delegate the first play host as
|
||||
the primary control node, so kube-0 will have more changed tasks than others:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=53 changed=8 unreachable=0 failed=0 skipped=30 rescued=0 ignored=0
|
||||
kube-1 : ok=47 changed=10 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
kube-2 : ok=47 changed=9 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
After logging into any of the servers (it doesn't matter), we can test that k3s
|
||||
is running across the cluster, that all nodes are ready and that everything is
|
||||
ready to execute our Kubernetes workloads by running the following:
|
||||
|
||||
- `sudo kubectl get nodes -o wide`
|
||||
- `sudo kubectl get pods -o wide --all-namespaces`
|
||||
|
||||
:hand: Note we are using `sudo` because we need to be root to access the
|
||||
kube config for this node. This behavior can be changed with specifying
|
||||
`write-kubeconfig-mode: 0644` in `k3s_server`.
|
||||
|
||||
**Get Nodes**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
kube-0 Ready etcd,master 2m58s v1.19.4+k3s1 10.10.9.2 10.10.9.2 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-1 Ready etcd,master 2m22s v1.19.4+k3s1 10.10.9.3 10.10.9.3 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-2 Ready etcd,master 2m10s v1.19.4+k3s1 10.10.9.4 10.10.9.4 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
```
|
||||
|
||||
**Get Pods**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get pods -o wide --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
kube-system coredns-66c464876b-rhgn6 1/1 Running 0 3m38s 10.42.0.2 kube-0 <none> <none>
|
||||
kube-system helm-install-traefik-vwglv 0/1 Completed 0 3m39s 10.42.0.3 kube-0 <none> <none>
|
||||
kube-system local-path-provisioner-7ff9579c6-d5xpb 1/1 Running 0 3m38s 10.42.0.5 kube-0 <none> <none>
|
||||
kube-system metrics-server-7b4f8b595-nhbt8 1/1 Running 0 3m38s 10.42.0.4 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-9lzcq 2/2 Running 0 2m56s 10.42.1.2 kube-1 <none> <none>
|
||||
kube-system svclb-traefik-vq487 2/2 Running 0 2m45s 10.42.2.2 kube-2 <none> <none>
|
||||
kube-system svclb-traefik-wkwkk 2/2 Running 0 3m1s 10.42.0.7 kube-0 <none> <none>
|
||||
kube-system traefik-5dd496474-lw6x8 1/1 Running 0 3m1s 10.42.0.6 kube-0 <none> <none>
|
||||
```
|
@@ -0,0 +1,121 @@
|
||||
# Quickstart: K3s single node
|
||||
|
||||
This is the quickstart guide to creating your own single-node k3s "cluster".
|
||||
|
||||
:hand: This example requires your Ansible user to be able to connect to the
|
||||
server over SSH using key-based authentication. The user is also has an entry
|
||||
in a sudoers file that allows privilege escalation without requiring a
|
||||
password.
|
||||
|
||||
To test this is the case, run the following check replacing `<ansible_user>`
|
||||
and `<server_name>`. The expected output is `Works`
|
||||
|
||||
`ssh <ansible_user>@<server_name> 'sudo cat /etc/shadow >/dev/null && echo "Works"'`
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"'
|
||||
Works
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $
|
||||
```
|
||||
|
||||
## Directory structure
|
||||
|
||||
Our working directory will have the following files:
|
||||
|
||||
```text
|
||||
kubernetes-playground/
|
||||
|_ inventory.yml
|
||||
|_ single_node.yml
|
||||
```
|
||||
|
||||
## Inventory
|
||||
|
||||
Here's a YAML based example inventory for our server called `inventory.yml`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
```
|
||||
|
||||
We can test this works with `ansible -i inventory.yml -m ping all`, expected
|
||||
result:
|
||||
|
||||
```text
|
||||
kube-0 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
```
|
||||
|
||||
## Playbook
|
||||
|
||||
Here is our playbook for a single node k3s cluster (`single_node.yml`):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
- name: Build a single node k3s cluster
|
||||
hosts: kube-0
|
||||
vars:
|
||||
k3s_become_for_all: true
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
```
|
||||
|
||||
## Execution
|
||||
|
||||
To execute the playbook against our inventory file, we will run the following
|
||||
command:
|
||||
|
||||
`ansible-playbook -i inventory.yml single_node.yml`
|
||||
|
||||
The output we can expect is similar to the below, with no failed or unreachable
|
||||
nodes:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=39 changed=8 unreachable=0 failed=0 skipped=39 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
After logging into the server, we can test that k3s is running and that it is
|
||||
ready to execute our Kubernetes workloads by running the following:
|
||||
|
||||
- `sudo kubectl get nodes`
|
||||
- `sudo kubectl get pods -o wide --all-namespaces`
|
||||
|
||||
:hand: Note we are using `sudo` because we need to be root to access the
|
||||
kube config for this node. This behavior can be changed with specifying
|
||||
`write-kubeconfig-mode: 0644` in `k3s_server`.
|
||||
|
||||
**Get Nodes**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
kube-0 Ready master 5m27s v1.19.4+k3s
|
||||
ansible@kube-0:~$
|
||||
```
|
||||
|
||||
**Get Pods**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get pods --all-namespaces -o wide
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
kube-system metrics-server-7b4f8b595-k692h 1/1 Running 0 9m38s 10.42.0.2 kube-0 <none> <none>
|
||||
kube-system local-path-provisioner-7ff9579c6-5lgzb 1/1 Running 0 9m38s 10.42.0.3 kube-0 <none> <none>
|
||||
kube-system coredns-66c464876b-xg42q 1/1 Running 0 9m38s 10.42.0.5 kube-0 <none> <none>
|
||||
kube-system helm-install-traefik-tdpcs 0/1 Completed 0 9m38s 10.42.0.4 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-hk248 2/2 Running 0 9m4s 10.42.0.7 kube-0 <none> <none>
|
||||
kube-system traefik-5dd496474-bf4kv 1/1 Running 0 9m4s 10.42.0.6 kube-0 <none> <none>
|
||||
```
|
32
server/ansible/roles/xanmanning.k3s/handlers/main.yml
Normal file
32
server/ansible/roles/xanmanning.k3s/handlers/main.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
|
||||
- name: reload systemd
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
scope: "{{ k3s_systemd_context }}"
|
||||
become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}"
|
||||
|
||||
- name: restart k3s
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
state: restarted
|
||||
scope: "{{ k3s_systemd_context }}"
|
||||
enabled: "{{ k3s_start_on_boot }}"
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: k3s_systemd_restart_k3s
|
||||
failed_when:
|
||||
- k3s_systemd_restart_k3s is not success
|
||||
- not ansible_check_mode
|
||||
become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}"
|
||||
|
||||
- name: restart docker
|
||||
ansible.builtin.systemd:
|
||||
name: docker
|
||||
state: restarted
|
||||
enabled: true
|
||||
register: k3s_systemd_restart_docker
|
||||
failed_when:
|
||||
- k3s_systemd_restart_docker is not success
|
||||
- not ansible_check_mode
|
||||
become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}"
|
@@ -0,0 +1,2 @@
|
||||
install_date: Sat Mar 20 23:31:38 2021
|
||||
version: v2.8.0
|
83
server/ansible/roles/xanmanning.k3s/meta/main.yml
Normal file
83
server/ansible/roles/xanmanning.k3s/meta/main.yml
Normal file
@@ -0,0 +1,83 @@
|
||||
---
|
||||
|
||||
galaxy_info:
|
||||
role_name: k3s
|
||||
namespace: xanmanning
|
||||
author: Xan Manning
|
||||
description: Ansible role for installing k3s as either a standalone server or HA cluster
|
||||
company: Pyrat Ltd.
|
||||
github_branch: main
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: http://example.com/issue/tracker
|
||||
|
||||
# Some suggested licenses:
|
||||
# - BSD (default)
|
||||
# - MIT
|
||||
# - GPLv2
|
||||
# - GPLv3
|
||||
# - Apache
|
||||
# - CC-BY
|
||||
license: BSD
|
||||
|
||||
min_ansible_version: '2.9'
|
||||
|
||||
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||
# min_ansible_container_version:
|
||||
|
||||
# Optionally specify the branch Galaxy will use when accessing the GitHub
|
||||
# repo for this role. During role install, if no tags are available,
|
||||
# Galaxy will use this branch. During import Galaxy will access files on
|
||||
# this branch. If Travis integration is configured, only notifications for this
|
||||
# branch will be accepted. Otherwise, in all cases, the repo's default branch
|
||||
# (usually main) will be used.
|
||||
# github_branch:
|
||||
|
||||
#
|
||||
# platforms is a list of platforms, and each platform has a name and a list of versions.
|
||||
#
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
versions:
|
||||
- all
|
||||
- name: EL
|
||||
versions:
|
||||
- 7
|
||||
- 8
|
||||
- name: Amazon
|
||||
- name: Fedora
|
||||
versions:
|
||||
- 29
|
||||
- 30
|
||||
- 31
|
||||
- name: Debian
|
||||
versions:
|
||||
- buster
|
||||
- jessie
|
||||
- stretch
|
||||
- name: SLES
|
||||
versions:
|
||||
- 15
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- xenial
|
||||
- bionic
|
||||
|
||||
galaxy_tags:
|
||||
- k3s
|
||||
- k8s
|
||||
- kubernetes
|
||||
- containerd
|
||||
- cluster
|
||||
- lightweight
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
@@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: node*
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_build_cluster: false
|
||||
k3s_install_dir: /opt/k3s/bin
|
||||
k3s_config_file: /opt/k3s/etc/k3s.yaml
|
||||
k3s_server:
|
||||
data-dir: /var/lib/k3s-io
|
||||
default-local-storage-path: /var/lib/k3s-io/local-storage
|
||||
k3s_server_manifests_templates:
|
||||
- "molecule/autodeploy/templates/00-ns-monitoring.yml.j2"
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
@@ -0,0 +1,44 @@
|
||||
---
|
||||
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: docker
|
||||
lint: |
|
||||
set -e
|
||||
yamllint -s .
|
||||
ansible-lint --exclude molecule/
|
||||
platforms:
|
||||
- name: node1
|
||||
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest"
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node2
|
||||
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest"
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node3
|
||||
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest"
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
provisioner:
|
||||
name: ansible
|
||||
options:
|
||||
verbose: true
|
||||
verifier:
|
||||
name: ansible
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Prepare
|
||||
hosts: node*
|
||||
become: true
|
||||
tasks:
|
||||
- name: Ensure apt cache is updated
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apt'
|
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: monitoring
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
# This is an example playbook to execute Ansible tests.
|
||||
|
||||
- name: Verify
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Example assertion
|
||||
ansible.builtin.assert:
|
||||
that: true
|
@@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
pyratlabs_issue_controller_dump: true
|
||||
pre_tasks:
|
||||
- name: Ensure k3s_debug is set
|
||||
ansible.builtin.set_fact:
|
||||
k3s_debug: true
|
||||
roles:
|
||||
- xanmanning.k3s
|
@@ -0,0 +1,44 @@
|
||||
---
|
||||
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: docker
|
||||
lint: |
|
||||
set -e
|
||||
yamllint -s .
|
||||
ansible-lint --exclude molecule/
|
||||
platforms:
|
||||
- name: node1
|
||||
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest"
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node2
|
||||
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest"
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node3
|
||||
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest"
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
provisioner:
|
||||
name: ansible
|
||||
options:
|
||||
verbose: true
|
||||
verifier:
|
||||
name: ansible
|
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Ensure apt cache is updated
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apt'
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
# This is an example playbook to execute Ansible tests.
|
||||
|
||||
- name: Verify
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Example assertion
|
||||
ansible.builtin.assert:
|
||||
that: true
|
@@ -0,0 +1,26 @@
|
||||
# Molecule managed
|
||||
|
||||
{% if item.registry is defined %}
|
||||
FROM {{ item.registry.url }}/{{ item.image }}
|
||||
{% else %}
|
||||
FROM {{ item.image }}
|
||||
{% endif %}
|
||||
|
||||
RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python systemd sudo bash ca-certificates && apt-get clean; \
|
||||
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python systemd sudo python-devel python*-dnf bash && dnf clean all; \
|
||||
elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python systemd sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
|
||||
elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python systemd sudo bash python-xml && zypper clean -a; \
|
||||
elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo systemd bash ca-certificates; \
|
||||
elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python systemd sudo bash ca-certificates && xbps-remove -O; fi
|
||||
|
||||
RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
|
||||
rm -f /lib/systemd/system/multi-user.target.wants/*; \
|
||||
rm -f /etc/systemd/system/*.wants/*; \
|
||||
rm -f /lib/systemd/system/local-fs.target.wants/*; \
|
||||
rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
|
||||
rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
|
||||
rm -f /lib/systemd/system/basic.target.wants/*; \
|
||||
rm -f /lib/systemd/system/anaconda.target.wants/*;
|
||||
|
||||
VOLUME [“/sys/fs/cgroup”]
|
||||
CMD [“/usr/sbin/init”]
|
@@ -0,0 +1,22 @@
|
||||
*******
|
||||
Docker driver installation guide
|
||||
*******
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
* Docker Engine
|
||||
|
||||
Install
|
||||
=======
|
||||
|
||||
Please refer to the `Virtual environment`_ documentation for installation best
|
||||
practices. If not using a virtual environment, please consider passing the
|
||||
widely recommended `'--user' flag`_ when invoking ``pip``.
|
||||
|
||||
.. _Virtual environment: https://virtualenv.pypa.io/en/latest/
|
||||
.. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install 'molecule[docker]'
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_install_hard_links: true
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
@@ -0,0 +1,44 @@
|
||||
---
|
||||
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: docker
|
||||
lint: |
|
||||
set -e
|
||||
yamllint -s .
|
||||
ansible-lint --exclude molecule/
|
||||
platforms:
|
||||
- name: node1
|
||||
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest"
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node2
|
||||
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest"
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node3
|
||||
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest"
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
provisioner:
|
||||
name: ansible
|
||||
options:
|
||||
verbose: true
|
||||
verifier:
|
||||
name: ansible
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_state: downloaded
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_state: restarted
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: node1
|
||||
become: true
|
||||
become_user: k3suser
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_use_experimental: true
|
||||
k3s_server:
|
||||
rootless: true
|
||||
k3s_agent:
|
||||
rootless: true
|
||||
k3s_install_dir: "/home/{{ ansible_user_id }}/bin"
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_build_cluster: false
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_state: started
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_state: stopped
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_state: uninstalled
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- name: Prepare
|
||||
hosts: node1
|
||||
become: true
|
||||
tasks:
|
||||
- name: Ensure a user group exists
|
||||
ansible.builtin.group:
|
||||
name: user
|
||||
state: present
|
||||
|
||||
- name: Ensure a normal user exists
|
||||
ansible.builtin.user:
|
||||
name: k3suser
|
||||
group: user
|
||||
state: present
|
||||
|
||||
- name: Ensure a normal user has bin directory
|
||||
ansible.builtin.file:
|
||||
path: /home/k3suser/bin
|
||||
state: directory
|
||||
owner: k3suser
|
||||
group: user
|
||||
mode: 0700
|
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Ensure apt cache is updated
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apt'
|
@@ -0,0 +1,14 @@
|
||||
import os
|
||||
|
||||
import testinfra.utils.ansible_runner
|
||||
|
||||
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
|
||||
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
|
||||
|
||||
|
||||
def test_hosts_file(host):
|
||||
f = host.file('/etc/hosts')
|
||||
|
||||
assert f.exists
|
||||
assert f.user == 'root'
|
||||
assert f.group == 'root'
|
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user