new talos cluster

This commit is contained in:
auricom
2022-11-19 04:47:32 +01:00
parent 42346bd99b
commit 4ac38f95e9
548 changed files with 1642 additions and 2331 deletions

View File

@@ -0,0 +1,10 @@
# .ansible-lint
exclude_paths:
- ~/.ansible/roles/xanmanning.k3s
skip_list:
- yaml[indentation]
- yaml[line-length]
warn_list:
- command-instead-of-module
- command-instead-of-shell
- unnamed-task

View File

@@ -0,0 +1,2 @@
#shellcheck disable=SC2148,SC2155
export ANSIBLE_CONFIG=$(expand_path ./ansible.cfg)

View File

@@ -0,0 +1,35 @@
[defaults]
# General settings
nocows = True
executable = /bin/bash
stdout_callback = yaml
force_valid_group_names = ignore
# File/Directory settings
log_path = ~/.ansible/ansible.log
inventory = ./inventory
roles_path = ~/.ansible/roles:./roles
collections_path = ~/.ansible/collections
remote_tmp = ~/.ansible/tmp
local_tmp = ~/.ansible/tmp
# Fact Caching settings
fact_caching = jsonfile
fact_caching_connection = ~/.ansible/facts_cache
# SSH settings
remote_port = 22
timeout = 60
host_key_checking = False
# Plugin settings
vars_plugins_enabled = host_group_vars,community.sops.sops
[inventory]
unparsed_is_failed = true
[privilege_escalation]
become = True
[ssh_connection]
scp_if_ssh = smart
retries = 3
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s
pipelining = True
control_path = %(directory)s/%%h-%%r

View File

@@ -0,0 +1,24 @@
kind: Secret
secret_domain: ENC[AES256_GCM,data:SjdnR9pDjveodvo=,iv:GKvdD7c3bmaQN+CAYoKwAy78em9vYljGyl6VfGmJk9E=,tag:hz92J7d1NokEeyB6vxr3Uw==,type:str]
secret_cluster_domain: ENC[AES256_GCM,data:o+bvKkMvPfZ9+oobxsZj,iv:iJTqLF0+3v/kMHWJIUXQK3++CoLI+fC6IOrQgpiXofw=,tag:XWEid6zEhdpxka88rW2mkw==,type:str]
secret_email_domain: ENC[AES256_GCM,data:xQwrd9Tgcgpq+I63KA8=,iv:w8fs1kXFwuRBNiswZMu5i/bOazqUPRxEwMWm0z/igxg=,tag:FaWpGtK7ldOEcHgXxZX6/A==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0dGgya0lVNUtvMEhmWFpm
dE8wdkppSEZiMjVteS9pZkxFaUltQ0VlUzNFCk1oVzVHTVIxVnIvL21YemtZVmJz
a3lmMnJaNGI2NXlUKzduS1ZVa1o5amcKLS0tICtLS2pRZjk4U285TzJnV0J3MUkw
c3JkOFZzYnpINjQ5QnNkaE9IYUdXL3MKsBelDv/z5nTYC6/1Zm8kmzqEoLBVPnhy
v0v/6n1GksmzslbNdKhy+xtxHYrqouhc2P4hNi0R8p8u76RXERN5fg==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-09-24T21:41:39Z"
mac: ENC[AES256_GCM,data:YWFS0eyejY3d7HrAewpKhs4Z0ATLZRFAhx/hO8+7OMHnCw+LSXzv0YCygVOTilUJ6By56CRwqF0B9gY/zQUF9mCklyFeHpogmPL92cbAe/gsgKpJI+Nnqrdrch2J8gRv485NI8EQ8sYqSZ0RNsyDiOOyY3OW86L4vqZBqb31O/4=,iv:EGKIAUqY7UQU2+1qpo2VYMvAMomn6vbmGv3uKCpLOOs=,tag:4MNWlJ5Knbymkr/T22P+FA==,type:str]
pgp: []
unencrypted_regex: ^(kind)$
version: 3.7.3

View File

@@ -0,0 +1,22 @@
kind: Secret
wireguard_private_key: ENC[AES256_GCM,data:n7+yDJlb50mm2CiFRJ8YbvtzZaJOD2Hlz1/jbwtCSerRPTbJpDnCaL78EdI=,iv:5D8M8lKJPiduyGp6D2Woi/VEHkAVHi3v5NB2LRY+UNA=,tag:NkvkhueDrDf/1Ly9zv5YCw==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPNWlaV1YvWUw0NEJOR2Rz
aHd5eU9SdjFuTDgyZDhzUjVIMmFMczg5MmlZCm5vT1VTdjh4WkhCNWsrOG9SaWFM
L0FpSGVuR3hPN04zNHRCd3JMQXVLZVEKLS0tIFFhY1plTzdScmJrWW8xMXpIUXBP
RHR1bnp1VXZJNUI5dmVXcXRvU2NFem8KFdpVMZL4By87eR2mFB5P2ViZxA04p2uI
oe1Wg5bmqLNsfr+Z/Ai6Xc8D9ojuPvNXUkrzdLq5i6M+mi1ultazxQ==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-07-22T12:36:41Z"
mac: ENC[AES256_GCM,data:Pdlc1pFCdB6C4Zzm2HkBh8NJn/uE3KVXwWmWlUqbjHRRCqxED1X7lMVxNHgy/ZmmuB1StoZrzwGUVTGRhpcWGX9D614TrKgjPtkr4dxdshYIfIXPsskVnNfULQcvitTjprLj3JKXbZgjO86hGo5c1SgZpEiapuNdvYSHH6EGjyU=,iv:72i8p3q9Tg1kU6BExNtlakXLLt19Aic5xmgU2Hv2VqI=,tag:yu0KkQVK2/Z0mr/scwIekQ==,type:str]
pgp: []
unencrypted_regex: ^(kind)$
version: 3.7.3

View File

@@ -0,0 +1,25 @@
kind: Secret
root_api_key: ENC[AES256_GCM,data:e+g6jvxD9kBSYVbzGXR0QZZMAnxndPu04Dhs3UjNsjHyq+GQRlapPJDQmnTWFa11KaEK3lOiSmU4yxcRjbgG2t3a,iv:mLG+dFHrmndRm5fT4KU+TIOMiAg/urQ4Zv3YaRaoVlg=,tag:DXTWollNdF4o2Pe2qdyufw==,type:str]
ansible_host: ENC[AES256_GCM,data:ldsDTnydWPMnAnOiSlVrkiiL6w==,iv:luNgXdV3uBRaGzBIlw4E5UrZqKBaakgwc+9YC9xXInM=,tag:MldHmJpsOqe7oJMA83Xm9g==,type:str]
ansible_password: ENC[AES256_GCM,data:6F+H0sO8BP7QSZxE6hE=,iv:GOMmcmYZVbT+UbjmHZf4f8jJaBEKV7JWDVpoMQ0QPsI=,tag:YZHl5Sy0wMLibgN7wJ7SNw==,type:str]
ansible_become_pass: ENC[AES256_GCM,data:KFih2YRvhMLDao5fQ+Q=,iv:cv54gnuCtg6Nt/XbUJ2osNnvPTGhnpKLc5btMY/cSW8=,tag:uxgxAj6WLqms+S2N677kyg==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPVy9DRjhqOW05Wm4rNXZo
bFJxem9UZjNSQW5UaTRZaWQ1clZQSHJrNHpVCmo3Y0RPd1BRRC9ZZHJ0SndSUXJv
UkpPWTNOUWFPL1hCUGJrTFBPZml5QncKLS0tIGI5UUJKMXR0d1d3ZzRDSURuWVFl
ZFlyQ1lGbnVPaSs4cytQYzNwRnJabmcKP0ogZqsaoD6heCqmObwttBgE039aLqe2
R55NPkQJJyFSbDbdDmPApE4IwtXay54QGw2RR4AxOZW4G2dWhdzP3w==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-07-21T19:48:24Z"
mac: ENC[AES256_GCM,data:nEaUZqbbRmmU69uLvsJODfzG/LmehP+B9PV1aVxLJD66VJrZR/eO70NohrAGC49PPJgt/I92NJmFLYZ6vtyz/IMTPSEckv/mxHR0U7AQ8+CSnwa8Alzd85OAa9fq4XZ17BBnuT+wBHdPq1H99zLw08MXShCxzx/1ygtb58DDj+k=,iv:5VtAIHJIxONYimmiakxZL12M6+Rig9urEVVAQcEBcbk=,tag:ojoIcXajAXYeTB3vOTIYBw==,type:str]
pgp: []
unencrypted_regex: ^(kind)$
version: 3.7.3

View File

@@ -0,0 +1,3 @@
main_nas: false
pool_name: vol1
snapshots_interval: "daily:14,weekly:12,monthly:12,yearly:3"

View File

@@ -0,0 +1,24 @@
kind: Secret
root_api_key: ENC[AES256_GCM,data:Fhj1MGeHxe/A6O7uVjMrCEu7J4rsiWrhbXgbAenb5CunoRPu0XLV/227WAFc4wFkboFNnt3bjzugvdvM5w/0JSry,iv:7uuHkrSKGShhIso8RgIJsOSYOxBiyyM/D5Dg+IGDh1Y=,tag:dP4gfIIUAEBUm91h5IHSug==,type:str]
ansible_password: ENC[AES256_GCM,data:zRaOy+b26VWMCVIPKLU=,iv:S+BX0fqVizWTZZr0A4MaXkw/4XhE2Pb+RGPjvnWuUpk=,tag:TUcGk8Hp9Zv17L/pmX4E7g==,type:str]
ansible_become_pass: ENC[AES256_GCM,data:xGVU7dW/MMI9bV6Vz+M=,iv:6/ikVQfHxjdCy5KKT+Yksj/OFws2WRcy8oDI2Oay7Eo=,tag:JOLmvpOAIjIHJ/K7Eaoxjw==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBtVllLOFcwWXVoNXZobFF1
VGJmczlkL1V3blhvcnFzN2V6S1B1Ui81alRNClVEUWFmSWxKbENBRVZJN01PSWM5
d2M3OHFhOGpadEdrWUIxZGpMNTR2aVkKLS0tIE84ZkxzTlBpZVlqR2xQRmM0V0ZR
aG5zWW1XclBOS2cxMkwzZ3c1R1psNGsKzeSHHV7AYXCUNiiXJlBRFVWMZtfK3naj
VRtF22+DYfjumQuwam2ZzhdLQ//1ciHnkJc58dKeTbYUHzC+fWpaZQ==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-07-21T19:48:18Z"
mac: ENC[AES256_GCM,data:nBonR9Ab5aY+F7w0HE+TRLScRtF5cQNxh3Uvc7jewiLnieolRQtfNiGzKk4YRgqFV8zRTbwS0jvpiqynhxl/ctIKWl2odVDrNkZljidn3jbSz5HUp+f6zxP3DCRXzsBFpunDT8CSdHBhdUWv+82WtFwg2pLH+nTtY11QkH4rQQk=,iv:ILeqDNEEPnb0serEObPMA2LC16ddScH1NwOiZ0M0EHo=,tag:puyv0jvBkCm/X/za6u3oVA==,type:str]
pgp: []
unencrypted_regex: ^(kind)$
version: 3.7.3

View File

@@ -0,0 +1,5 @@
main_nas: true
pool_name: storage
service_s3: true
snapshots_interval: "daily:14,weekly:12,monthly:3"
postgres_version: 14

View File

@@ -0,0 +1,47 @@
---
all:
hosts:
localhost:
ansible_connection: local
ansible_python_interpreter: /usr/bin/python3
coreelec:
ansible_host: coreelec.{{ secret_domain }}
ansible_user: root
children:
truenas-instances:
hosts:
truenas:
ansible_host: truenas.{{ secret_domain }}
truenas-remote:
ansible_port: 35875
vars:
ansible_user: homelab
truenas-jails:
hosts:
borgserver:
ansible_host: borgserver.{{ secret_domain }}
# postgres:
kubernetes:
children:
master:
hosts:
k3s-master:
ansible_host: 192.168.9.100
ansible_user: fedora
ansible_ssh_port: 22
worker:
hosts:
k3s-worker1:
ansible_host: 192.168.9.105
rook_devices:
- /dev/nvme0n1
k3s-worker2:
ansible_host: 192.168.9.106
rook_devices:
- /dev/nvme0n1
k3s-worker3:
ansible_host: 192.168.9.107
rook_devices:
- /dev/nvme0n1
vars:
ansible_user: fedora

View File

@@ -0,0 +1,17 @@
---
- name: Boostrap host to enable Ansible playbooks
hosts: all
become: true
become_user: root
vars:
python_pwd: /usr/bin/python
python_package: python3
tasks:
- name: Check for Python
ansible.builtin.raw: test -e {{ python_pwd }}
changed_when: false
failed_when: false
register: check_python
- name: Install Python
ansible.builtin.raw: pkg install -y {{ python_package }}
when: check_python.rc != 0

View File

@@ -0,0 +1,7 @@
---
- hosts: coreelec
become: true
gather_facts: true
any_errors_fatal: true
roles:
- role: coreelec

View File

@@ -0,0 +1,7 @@
---
- hosts: truenas-instances
become: false
gather_facts: true
any_errors_fatal: true
roles:
- role: truenas

View File

@@ -0,0 +1,115 @@
---
- hosts: localhost
become: false
gather_facts: true
any_errors_fatal: true
vars:
alacritty:
font_size: 11.0
window_columns: 150
window_lines: 40
tasks:
- name: system | disable password sudo
ansible.builtin.lineinfile:
dest: /etc/sudoers
state: present
regexp: "^%admin"
line: "%admin ALL=(ALL) NOPASSWD: ALL"
validate: visudo -cf %s
become: true
- name: chezmoi | create chezmoi directory
ansible.builtin.file:
state: directory
path: ~/.config/chezmoi
mode: 0700
- name: chezmoi | templating chezmoi.toml
ansible.builtin.blockinfile:
path: ~/.config/chezmoi/chezmoi.toml
create: true
mode: 0700
block: |
[data]
alacritty_font_size = 11.0
alaritty_window_columns = 150
alacritty_window_lines = 40
- name: gnome | create directories
ansible.builtin.file:
state: directory
path: ~/.local/share/fonts
mode: 0700
- name: gnome | download nerd fonts
ansible.builtin.get_url:
url: "{{ item }}"
dest: ~/.local/share/fonts
mode: 0700
loop:
- https://github.com/ryanoasis/nerd-fonts/raw/master/patched-fonts/FiraCode/Retina/complete/Fira%20Code%20Retina%20Nerd%20Font%20Complete.ttf
- https://github.com/ryanoasis/nerd-fonts/raw/master/patched-fonts/FiraCode/Retina/complete/Fira%20Code%20Retina%20Nerd%20Font%20Complete%20Mono.ttf
- name: brew | clone homebrew GitHub repo
ansible.builtin.git:
repo: "https://github.com/Homebrew/brew"
dest: "/home/{{ lookup('env', 'USER') }}/.linuxbrew/Homebrew"
version: "master"
mode: 0775
- name: brew | create bin directory for homebrew
ansible.builtin.file:
path: "/home/{{ lookup('env', 'USER') }}/.linuxbrew/bin"
state: directory
mode: 0775
- name: brew | create a symbolic link for brew
ansible.builtin.file:
src: "/home/{{ lookup('env', 'USER') }}/.linuxbrew/Homebrew/bin/brew"
dest: "/home/{{ lookup('env', 'USER') }}/.linuxbrew/bin/brew"
state: link
- name: shell | make Fish default shell
ansible.builtin.user:
name: "{{ lookup('env', 'USER') }}"
shell: /usr/bin/fish
become: true
- name: vscodium | apt key
ansible.builtin.apt_key:
url: https://gitlab.com/paulcarroty/vscodium-deb-rpm-repo/raw/master/pub.gpg
keyring: /usr/share/keyrings/vscodium-archive-keyring.gpg
- name: vscodium | apt repository
ansible.builtin.apt_repository:
repo: "deb [ signed-by=/usr/share/keyrings/vscodium-archive-keyring.gpg ] https://download.vscodium.com/debs vscodium main"
filename: vscodium
- name: alacritty | apt repository
ansible.builtin.apt_repository:
repo: "{{ item }}"
loop:
- "ppa:mmstick76/alacritty"
- "ppa:fish-shell/release-3"
- name: packages | apt
ansible.builtin.apt:
name:
- python3-pip
- neovim
- tmux
- fd-find
- bat
- fzf
- jq
- npm
- fish
- codium
- alacritty
update_cache: true
become: true
- name: packages | brew
community.general.homebrew:
name:
- lsd
- age
- starship
- shellcheck
- kubectl
- helm
- kustomize
- sops
- gh
- chezmoi
- k9s
- awscli
- kubetcx
- kubens

View File

@@ -0,0 +1,7 @@
---
- hosts: localhost
become: false
gather_facts: true
any_errors_fatal: true
roles:
- role: workstation

View File

@@ -0,0 +1,14 @@
---
collections:
- name: ansible.posix
version: 1.4.0
- name: community.general
version: 6.0.1
- name: kubernetes.core
version: 2.3.2
- name: community.sops
version: 1.4.1
roles:
- name: xanmanning.k3s
src: https://github.com/PyratLabs/ansible-role-k3s.git
version: v3.3.1

View File

@@ -0,0 +1,6 @@
---
root_path: /storage
nfs_shares:
- music
- photo
- video

View File

@@ -0,0 +1,16 @@
#!/bin/bash
# Variables
FLAG_NOTIF=false
DATE=`date +%Y%m%d%H%M`
BACKUP_PATH="/storage/backup"
cd /
tar cvf ${BACKUP_PATH}/${DATE}.tar \
storage/.kodi storage/.config storage/.cache storage/.ssh \
--exclude=storage/.kodi/userdata/Thumbnails
# Keep the last 5 backups on disk
find ${BACKUP_PATH}/*.tar -mtime +5 -type f -delete

View File

@@ -0,0 +1,13 @@
---
- name: backup | copy script
ansible.builtin.copy:
src: backup.bash
dest: /storage/backup.bash
mode: 0755
- name: backup | crontab
ansible.builtin.cron:
name: "daily backup"
minute: "14"
hour: "4"
job: "/storage/backup.bash && curl -fsS -m 10 --retry 5 -o /dev/null https://healthchecks.{{ secret_cluster_domain }}/ping/aae30879-cfdf-4b90-889f-d4ff69dd8aad"

View File

@@ -0,0 +1,8 @@
---
- ansible.builtin.include_tasks: backup.yml
tags:
- backup
- ansible.builtin.include_tasks: nfs.yml
tags:
- nfs

View File

@@ -0,0 +1,24 @@
---
- name: nfs | create directories
ansible.builtin.file:
path: "{{ root_path }}/mnt/{{ item }}"
state: directory
mode: 0775
loop: "{{ nfs_shares }}"
- name: nfs | create system.d services
ansible.builtin.template:
src: "storage-nfs.mount"
dest: "/storage/.config/system.d/storage-mnt-{{ item }}.mount"
mode: 0775
loop: "{{ nfs_shares }}"
register: services
- name: nfs | activate system.d services
ansible.builtin.systemd:
name: storage-mnt-{{ item }}.mount
state: restarted
enabled: true
daemon_reload: true
loop: "{{ nfs_shares }}"
when: services.changed

View File

@@ -0,0 +1,16 @@
#====================================================
[Unit]
Description=TrueNAS nfs share {{ item }}
Requires=network-online.service
After=network-online.service
Before=kodi.service
[Mount]
What=truenas.{{ secret_domain }}:/mnt/storage/{{ item }}
Where=/storage/mnt/{{ item }}
Options=
Type=nfs
[Install]
WantedBy=multi-user.target
#====================================================

View File

@@ -0,0 +1,10 @@
---
homelab_homedir: "/mnt/{{ pool_name }}/home/homelab"
backups_dir: "/mnt/{{ pool_name }}/backups/"
telegraf_dir: "{{ homelab_homedir }}/telegraf"
scripts_dir: "{{ homelab_homedir }}/scripts"
certificates_dir: "{{ homelab_homedir }}/letsencrypt/{{ secret_domain }}"
ping_ip: 192.168.8.1
wg_interface: wg0-client
dns_hostname: services.{{ secret_domain }}

View File

@@ -0,0 +1,96 @@
#!/usr/bin/env bash
PUID=${PUID:-1000}
PGID=${PGID:-1000}
usermod -o -u "$PUID" borg &>/dev/null
groupmod -o -g "$PGID" borg &>/dev/null
BORG_DATA_DIR=/backups
SSH_KEY_DIR=/keys
BORG_CMD='cd ${BORG_DATA_DIR}/${client_name}; borg serve --restrict-to-path ${BORG_DATA_DIR}/${client_name} ${BORG_SERVE_ARGS}'
AUTHORIZED_KEYS_PATH=/home/borg/.ssh/authorized_keys
# Append only mode?
BORG_APPEND_ONLY=${BORG_APPEND_ONLY:=no}
source /etc/os-release
echo "########################################################"
echo -n " * BorgServer powered by "
borg -V
echo " * Based on k8s-at-home"
echo "########################################################"
echo " * User id: $(id -u borg)"
echo " * Group id: $(id -g borg)"
echo "########################################################"
# Precheck if BORG_ADMIN is set
if [ "${BORG_APPEND_ONLY}" == "yes" ] && [ -z "${BORG_ADMIN}" ] ; then
echo "WARNING: BORG_APPEND_ONLY is active, but no BORG_ADMIN was specified!"
fi
# Precheck directories & client ssh-keys
for dir in BORG_DATA_DIR SSH_KEY_DIR ; do
dirpath=$(eval echo '$'${dir})
echo " * Testing Volume ${dir}: ${dirpath}"
if [ ! -d "${dirpath}" ] ; then
echo "ERROR: ${dirpath} is no directory!"
exit 1
fi
if [ "$(find ${SSH_KEY_DIR}/clients ! -regex '.*/\..*' -a -type f | wc -l)" == "0" ] ; then
echo "ERROR: No SSH-Pubkey file found in ${SSH_KEY_DIR}"
exit 1
fi
done
# Create SSH-Host-Keys on persistent storage, if not exist
mkdir -p ${SSH_KEY_DIR}/host 2>/dev/null
echo " * Checking / Preparing SSH Host-Keys..."
for keytype in ed25519 rsa ; do
if [ ! -f "${SSH_KEY_DIR}/host/ssh_host_${keytype}_key" ] ; then
echo " ** Creating SSH Hostkey [${keytype}]..."
ssh-keygen -q -f "${SSH_KEY_DIR}/host/ssh_host_${keytype}_key" -N '' -t ${keytype}
fi
done
echo "########################################################"
echo " * Starting SSH-Key import..."
# Add every key to borg-users authorized_keys
rm ${AUTHORIZED_KEYS_PATH} &>/dev/null
for keyfile in $(find "${SSH_KEY_DIR}/clients" ! -regex '.*/\..*' -a -type f); do
client_name=$(basename ${keyfile})
mkdir ${BORG_DATA_DIR}/${client_name} 2>/dev/null
echo " ** Adding client ${client_name} with repo path ${BORG_DATA_DIR}/${client_name}"
# If client is $BORG_ADMIN unset $client_name, so path restriction equals $BORG_DATA_DIR
# Otherwise add --append-only, if enabled
borg_cmd=${BORG_CMD}
if [ "${client_name}" == "${BORG_ADMIN}" ] ; then
echo " ** Client '${client_name}' is BORG_ADMIN! **"
unset client_name
elif [ "${BORG_APPEND_ONLY}" == "yes" ] ; then
borg_cmd="${BORG_CMD} --append-only"
fi
echo -n "restrict,command=\"$(eval echo -n \"${borg_cmd}\")\" " >> ${AUTHORIZED_KEYS_PATH}
cat ${keyfile} >> ${AUTHORIZED_KEYS_PATH}
echo >> ${AUTHORIZED_KEYS_PATH}
done
chmod 0600 "${AUTHORIZED_KEYS_PATH}"
echo " * Validating structure of generated ${AUTHORIZED_KEYS_PATH}..."
ERROR=$(ssh-keygen -lf ${AUTHORIZED_KEYS_PATH} 2>&1 >/dev/null)
if [ $? -ne 0 ]; then
echo "ERROR: ${ERROR}"
exit 1
fi
chown -R borg:borg ${BORG_DATA_DIR}
chown borg:borg ${AUTHORIZED_KEYS_PATH}
chmod 600 ${AUTHORIZED_KEYS_PATH}
echo "########################################################"
echo " * Init done!"

View File

@@ -0,0 +1,5 @@
HostKey /keys/host/ssh_host_rsa_key
HostKey /keys/host/ssh_host_ed25519_key
AuthorizedKeysFile .ssh/authorized_keys
Subsystem sftp /usr/libexec/sftp-server
PermitRootLogin yes

View File

@@ -0,0 +1,240 @@
#!/usr/bin/env python3
"""
Import and activate a SSL/TLS certificate into FreeNAS 11.1 or later
Uses the FreeNAS API to make the change, so everything's properly saved in the config
database and captured in a backup.
Requires paths to the cert (including the any intermediate CA certs) and private key,
and username, password, and FQDN of your FreeNAS system.
Your private key should only be readable by root, so this script must run with root
privileges. And, since it contains your root password, this script itself should
only be readable by root.
Source: https://github.com/danb35/deploy-freenas
"""
import argparse
import os
import sys
import json
import requests
import time
import configparser
import socket
from datetime import datetime, timedelta
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
parser = argparse.ArgumentParser(description='Import and activate a SSL/TLS certificate into FreeNAS.')
parser.add_argument('-c', '--config', default=(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'deploy_config')), help='Path to config file, defaults to deploy_config.')
args = parser.parse_args()
if os.path.isfile(args.config):
config = configparser.ConfigParser()
config.read(args.config)
deploy = config['deploy']
else:
print("Config file", args.config, "does not exist!")
exit(1)
# We'll use the API key if provided
API_KEY = deploy.get('api_key')
# Otherwise fallback to basic password authentication
USER = "root"
PASSWORD = deploy.get('password')
DOMAIN_NAME = deploy.get('cert_fqdn',socket.gethostname())
FREENAS_ADDRESS = deploy.get('connect_host','localhost')
VERIFY = deploy.getboolean('verify',fallback=False)
PRIVATEKEY_PATH = deploy.get('privkey_path',"/root/.acme.sh/" + DOMAIN_NAME + "/" + DOMAIN_NAME + ".key")
FULLCHAIN_PATH = deploy.get('fullchain_path',"/root/.acme.sh/" + DOMAIN_NAME + "/fullchain.cer")
PROTOCOL = deploy.get('protocol','http://')
PORT = deploy.get('port','80')
FTP_ENABLED = deploy.getboolean('ftp_enabled',fallback=False)
S3_ENABLED = deploy.getboolean('s3_enabled',fallback=False)
now = datetime.now()
cert = "letsencrypt-%s-%s-%s-%s" %(now.year, now.strftime('%m'), now.strftime('%d'), ''.join(c for c in now.strftime('%X') if
c.isdigit()))
# Set some general request params
session = requests.Session()
session.headers.update({
'Content-Type': 'application/json'
})
if API_KEY:
session.headers.update({
'Authorization': f'Bearer {API_KEY}'
})
elif PASSWORD:
session.auth = (USER, PASSWORD)
else:
print ("Unable to authenticate. Specify 'api_key' or 'password' in the config.")
exit(1)
# Load cert/key
with open(PRIVATEKEY_PATH, 'r') as file:
priv_key = file.read()
with open(FULLCHAIN_PATH, 'r') as file:
full_chain = file.read()
# Update or create certificate
r = session.post(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/certificate/',
verify=VERIFY,
data=json.dumps({
"create_type": "CERTIFICATE_CREATE_IMPORTED",
"name": cert,
"certificate": full_chain,
"privatekey": priv_key,
})
)
if r.status_code == 200:
print ("Certificate import successful")
else:
print ("Error importing certificate!")
print (r.text)
sys.exit(1)
# Sleep for a few seconds to let the cert propagate
time.sleep(5)
# Download certificate list
limit = {'limit': 0} # set limit to 0 to disable paging in the event of many certificates
r = session.get(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/certificate/',
verify=VERIFY,
params=limit
)
if r.status_code == 200:
print ("Certificate list successful")
else:
print ("Error listing certificates!")
print (r.text)
sys.exit(1)
# Parse certificate list to find the id that matches our cert name
cert_list = r.json()
new_cert_data = None
for cert_data in cert_list:
if cert_data['name'] == cert:
new_cert_data = cert_data
cert_id = new_cert_data['id']
break
if not new_cert_data:
print ("Error searching for newly imported certificate in certificate list.")
sys.exit(1)
# Set our cert as active
r = session.put(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/system/general/',
verify=VERIFY,
data=json.dumps({
"ui_certificate": cert_id,
})
)
if r.status_code == 200:
print ("Setting active certificate successful")
else:
print ("Error setting active certificate!")
print (r.text)
sys.exit(1)
if FTP_ENABLED:
# Set our cert as active for FTP plugin
r = session.put(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/ftp/',
verify=VERIFY,
data=json.dumps({
"ssltls_certfile": cert,
}),
)
if r.status_code == 200:
print ("Setting active FTP certificate successful")
else:
print ("Error setting active FTP certificate!")
print (r.text)
sys.exit(1)
if S3_ENABLED:
# Set our cert as active for S3 plugin
r = session.put(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/s3/',
verify=VERIFY,
data=json.dumps({
"certificate": cert_id,
}),
)
if r.status_code == 200:
print ("Setting active S3 certificate successful")
else:
print ("Error setting active S3 certificate!")
print (r)
sys.exit(1)
# Get expired and old certs with same SAN
cert_ids_same_san = set()
cert_ids_expired = set()
for cert_data in cert_list:
if set(cert_data['san']) == set(new_cert_data['san']):
cert_ids_same_san.add(cert_data['id'])
issued_date = datetime.strptime(cert_data['from'], "%c")
lifetime = timedelta(days=cert_data['lifetime'])
expiration_date = issued_date + lifetime
if expiration_date < now:
cert_ids_expired.add(cert_data['id'])
# Remove new cert_id from lists
if cert_id in cert_ids_expired:
cert_ids_expired.remove(cert_id)
if cert_id in cert_ids_same_san:
cert_ids_same_san.remove(cert_id)
# Delete expired and old certificates with same SAN from freenas
for cid in (cert_ids_same_san | cert_ids_expired):
r = session.delete(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/certificate/id/' + str(cid),
verify=VERIFY
)
for c in cert_list:
if c['id'] == cid:
cert_name = c['name']
if r.status_code == 200:
print ("Deleting certificate " + cert_name + " successful")
else:
print ("Error deleting certificate " + cert_name + "!")
print (r.text)
sys.exit(1)
# Reload nginx with new cert
# If everything goes right, the request fails with a ConnectionError
try:
r = session.post(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/system/general/ui_restart',
verify=VERIFY
)
if r.status_code == 200:
print ("Reloading WebUI successful")
print ("deploy_freenas.py executed successfully")
else:
print ("Error reloading WebUI!")
print ("{}: {}".format(r.status_code, r.text))
sys.exit(1)
except requests.exceptions.ConnectionError:
print ("Error reloading WebUI!")
sys.exit(1)

View File

@@ -0,0 +1,107 @@
#!/usr/bin/env python3
# clearempty.py - Koen Vermeer <k.vermeer@eyehospital.nl>
# Inspired by rollup.py by Arno Hautala <arno@alum.wpi.edu>
# modifications by Arno Hautala
# This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
# (CC BY-SA-3.0) http://creativecommons.org/licenses/by-sa/3.0/
# This script removes empty snapshots, based on their 'used' property.
# Note that one snapshot's 'used' value may change when another snapshot is
# destroyed. This script iteratively destroys the oldest empty snapshot. It
# does not remove the latest snapshot of each dataset or manual snapshots
import subprocess
import argparse
import sys
from collections import defaultdict
parser = argparse.ArgumentParser(description='Removes empty auto snapshots.')
parser.add_argument('datasets', nargs='+', help='the root dataset(s) from which to remove snapshots')
parser.add_argument('--test', '-t', action="store_true", default=False, help='only display the snapshots that would be deleted, without actually deleting them. Note that due to dependencies between snapshots, this may not match what would really happen.')
parser.add_argument('--recursive', '-r', action="store_true", default=False, help='recursively removes snapshots from nested datasets')
parser.add_argument('--prefix', '-p', action='append', help='list of snapshot name prefixes that will be considered')
args = parser.parse_args()
if not args.prefix:
args.prefix = ['auto']
args.prefix = [prefix+"-" for prefix in set(args.prefix)]
deleted = defaultdict(lambda : defaultdict(lambda : defaultdict(int)))
snapshot_was_deleted = True
while snapshot_was_deleted:
snapshot_was_deleted = False
snapshots = defaultdict(lambda : defaultdict(lambda : defaultdict(int)))
# Get properties of all snapshots of the selected datasets
for dataset in args.datasets:
subp = subprocess.Popen(["zfs", "get", "-Hrpo", "name,property,value", "type,creation,used,freenas:state", dataset], stdout=subprocess.PIPE)
zfs_snapshots = subp.communicate()[0]
if subp.returncode:
print("zfs get failed with RC=%s" % subp.returncode)
sys.exit(1)
for snapshot in zfs_snapshots.splitlines():
name,property,value = snapshot.decode().split('\t',3)
# if the rollup isn't recursive, skip any snapshots from child datasets
if not args.recursive and not name.startswith(dataset+"@"):
continue
try:
dataset,snapshot = name.split('@',2)
except ValueError:
continue
snapshots[dataset][snapshot][property] = value
# Ignore non-snapshots and not-auto-snapshots
# Remove already destroyed snapshots
for dataset in list(snapshots.keys()):
latest = None
latestNEW = None
for snapshot in sorted(snapshots[dataset], key=lambda snapshot: snapshots[dataset][snapshot]['creation'], reverse=True):
if not any(map(snapshot.startswith, args.prefix)) \
or snapshots[dataset][snapshot]['type'] != "snapshot":
del snapshots[dataset][snapshot]
continue
if not latest:
latest = snapshot
del snapshots[dataset][snapshot]
continue
if not latestNEW and snapshots[dataset][snapshot]['freenas:state'] == 'NEW':
latestNEW = snapshot
del snapshots[dataset][snapshot]
continue
if snapshots[dataset][snapshot]['freenas:state'] == 'LATEST':
del snapshots[dataset][snapshot]
continue
if snapshots[dataset][snapshot]['used'] != '0' \
or snapshot in list(deleted[dataset].keys()):
del snapshots[dataset][snapshot]
continue
# Stop if no snapshots are in the list
if not snapshots[dataset]:
del snapshots[dataset]
continue
# destroy the most recent empty snapshot
snapshot = max(snapshots[dataset], key=lambda snapshot: snapshots[dataset][snapshot]['creation'])
if not args.test:
# destroy the snapshot
subprocess.call(["zfs", "destroy", dataset+"@"+snapshot])
deleted[dataset][snapshot] = snapshots[dataset][snapshot]
snapshot_was_deleted = True
for dataset in sorted(deleted.keys()):
if not deleted[dataset]:
continue
print(dataset)
for snapshot in sorted(deleted[dataset].keys()):
print("\t", snapshot, deleted[dataset][snapshot]['used'])

View File

@@ -0,0 +1,262 @@
#!/usr/bin/env python3
# rollup.py - Arno Hautala <arno@alum.wpi.edu>
# This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
# (CC BY-SA-3.0) http://creativecommons.org/licenses/by-sa/3.0/
# For the latest version, visit:
# https://github.com/fracai/zfs-rollup
# https://bitbucket.org/fracai/zfs-rollup
# A snapshot pruning script, similar in behavior to Apple's TimeMachine
# Keep hourly snapshots for the last day, daily for the last week, and weekly thereafter.
# TODO:
# rollup based on local time, not UTC
# requires pytz, or manually determining and converting time offsets
# improve documentation
# TEST:
import datetime
import calendar
import time
import subprocess
import argparse
import sys
from collections import defaultdict
intervals = {}
intervals['hourly'] = { 'max':24, 'abbreviation':'h', 'reference':'%Y-%m-%d %H' }
intervals['daily'] = { 'max': 7, 'abbreviation':'d', 'reference':'%Y-%m-%d' }
intervals['weekly'] = { 'max': 0, 'abbreviation':'w', 'reference':'%Y-%W' }
intervals['monthly'] = { 'max':12, 'abbreviation':'m', 'reference':'%Y-%m' }
intervals['yearly'] = { 'max':10, 'abbreviation':'y', 'reference':'%Y' }
modifiers = {
'M' : 1,
'H' : 60,
'h' : 60,
'd' : 60*24,
'w' : 60*24*7,
'm' : 60*24*28,
'y' : 60*24*365,
}
used_intervals = {
'hourly': intervals['hourly'],
'daily' : intervals['daily'],
'weekly': intervals['weekly']
}
parser = argparse.ArgumentParser(description='Prune excess snapshots, keeping hourly for the last day, daily for the last week, and weekly thereafter.')
parser.add_argument('datasets', nargs='+', help='The root dataset(s) from which to prune snapshots')
parser.add_argument('-t', '--test', action="store_true", default=False, help='Only display the snapshots that would be deleted, without actually deleting them')
parser.add_argument('-v', '--verbose', action="store_true", default=False, help='Display verbose information about which snapshots are kept, pruned, and why')
parser.add_argument('-r', '--recursive', action="store_true", default=False, help='Recursively prune snapshots from nested datasets')
parser.add_argument('--prefix', '-p', action='append', help='list of snapshot name prefixes that will be considered')
parser.add_argument('-c', '--clear', action="store_true", default=False, help='remove all snapshots')
parser.add_argument('-i', '--intervals',
help="Modify and define intervals with which to keep and prune snapshots. Either name existing intervals ("+
", ".join(sorted(intervals, key=lambda interval: modifiers[intervals[interval]['abbreviation']]))+"), "+
"modify the number of those to store (hourly:12), or define new intervals according to interval:count (2h:12). "+
"Multiple intervals may be specified if comma seperated (hourly,daily:30,2h:12). Available modifier abbreviations are: "+
", ".join(sorted(modifiers, key=modifiers.get))
)
args = parser.parse_args()
if not args.prefix:
args.prefix = ['auto']
args.prefix = [prefix+"-" for prefix in set(args.prefix)]
if args.test:
args.verbose = True
if args.intervals:
used_intervals = {}
for interval in args.intervals.split(','):
if interval.count(':') == 1:
period,count = interval.split(':')
try:
int(count)
except ValueError:
print("invalid count: "+count)
sys.exit(1)
if period in intervals:
used_intervals[period] = intervals[period]
used_intervals[period]['max'] = count
else:
try:
if period[-1] in modifiers:
used_intervals[interval] = { 'max' : count, 'interval' : int(period[:-1]) * modifiers[period[-1]] }
else:
used_intervals[interval] = { 'max' : count, 'interval' : int(period) }
except ValueError:
print("invalid period: "+period)
sys.exit(1)
elif interval.count(':') == 0 and interval in intervals:
used_intervals[interval] = intervals[interval]
else:
print("invalid interval: "+interval)
sys.exit(1)
for interval in used_intervals:
if 'abbreviation' not in used_intervals[interval]:
used_intervals[interval]['abbreviation'] = interval
snapshots = defaultdict(lambda : defaultdict(lambda : defaultdict(int)))
for dataset in args.datasets:
subp = subprocess.Popen(["zfs", "get", "-Hrpo", "name,property,value", "creation,type,used,freenas:state", dataset], stdout=subprocess.PIPE)
zfs_snapshots = subp.communicate()[0]
if subp.returncode:
print("zfs get failed with RC=%s" % subp.returncode)
sys.exit(1)
for snapshot in zfs_snapshots.splitlines():
name,property,value = snapshot.decode().split('\t',3)
# if the rollup isn't recursive, skip any snapshots from child datasets
if not args.recursive and not name.startswith(dataset+"@"):
continue
try:
dataset,snapshot = name.split('@',2)
except ValueError:
continue
# enforce that this is a snapshot starting with one of the requested prefixes
if not any(map(snapshot.startswith, args.prefix)):
if property == 'creation':
print("will ignore:\t", dataset+"@"+snapshot)
snapshots[dataset][snapshot][property] = value
for dataset in list(snapshots.keys()):
latestNEW = None
latest = None
for snapshot in sorted(snapshots[dataset], key=lambda snapshot: snapshots[dataset][snapshot]['creation'], reverse=True):
if not latest:
latest = snapshot
snapshots[dataset][snapshot]['keep'] = 'RECENT'
continue
if not any(map(snapshot.startswith, args.prefix)) \
or snapshots[dataset][snapshot]['type'] != "snapshot":
snapshots[dataset][snapshot]['keep'] = '!PREFIX'
continue
if not latestNEW and snapshots[dataset][snapshot]['freenas:state'] == 'NEW':
latestNEW = snapshot
snapshots[dataset][snapshot]['keep'] = 'NEW'
continue
if snapshots[dataset][snapshot]['freenas:state'] == 'LATEST':
snapshots[dataset][snapshot]['keep'] = 'LATEST'
continue
if not len(list(snapshots[dataset].keys())):
del snapshots[dataset]
for dataset in sorted(snapshots.keys()):
print(dataset)
sorted_snapshots = sorted(snapshots[dataset], key=lambda snapshot: snapshots[dataset][snapshot]['creation'])
most_recent = sorted_snapshots[-1]
rollup_intervals = defaultdict(lambda : defaultdict(int))
for snapshot in sorted_snapshots:
prune = True
if args.clear:
continue
epoch = snapshots[dataset][snapshot]['creation']
for interval in list(used_intervals.keys()):
if 'reference' in used_intervals[interval]:
reference = time.strftime(used_intervals[interval]['reference'], time.gmtime(float(epoch)))
if reference not in rollup_intervals[interval]:
if int(used_intervals[interval]['max']) != 0 and len(rollup_intervals[interval]) >= int(used_intervals[interval]['max']):
rollup_intervals[interval].pop(sorted(rollup_intervals[interval].keys())[0])
rollup_intervals[interval][reference] = epoch
elif 'interval' in used_intervals[interval]:
if int(used_intervals[interval]['max']) != 0 and len(rollup_intervals[interval]) >= int(used_intervals[interval]['max']):
rollup_intervals[interval].pop(sorted(rollup_intervals[interval].keys())[0])
if (not rollup_intervals[interval]) or int(sorted(rollup_intervals[interval].keys())[-1]) + (used_intervals[interval]['interval']*60*.9) < int(epoch):
rollup_intervals[interval][epoch] = epoch
ranges = list()
ranges.append(list())
for snapshot in sorted_snapshots:
prune = True
epoch = snapshots[dataset][snapshot]['creation']
if 'keep' in snapshots[dataset][snapshot]:
prune = False
ranges.append(list())
for interval in list(used_intervals.keys()):
if 'reference' in used_intervals[interval]:
reference = time.strftime(used_intervals[interval]['reference'], time.gmtime(float(epoch)))
if reference in rollup_intervals[interval] and rollup_intervals[interval][reference] == epoch:
prune = False
ranges.append(list())
elif 'interval' in used_intervals[interval]:
if epoch in rollup_intervals[interval]:
prune = False
ranges.append(list())
if prune or args.verbose:
print("\t","pruning\t" if prune else " \t", "@"+snapshot, end=' ')
if args.verbose:
for interval in list(used_intervals.keys()):
if 'reference' in used_intervals[interval]:
reference = time.strftime(used_intervals[interval]['reference'], time.gmtime(float(epoch)))
if reference in rollup_intervals[interval] and rollup_intervals[interval][reference] == epoch:
print(used_intervals[interval]['abbreviation'], end=' ')
else:
print('-', end=' ')
if 'interval' in used_intervals[interval]:
if epoch in rollup_intervals[interval]:
print(used_intervals[interval]['abbreviation'], end=' ')
else:
print('-', end=' ')
if 'keep' in snapshots[dataset][snapshot]:
print(snapshots[dataset][snapshot]['keep'][0], end=' ')
else:
print('-', end=' ')
print(snapshots[dataset][snapshot]['used'])
else:
print()
if prune:
ranges[-1].append(snapshot)
for range in ranges:
if not range:
continue
to_delete = dataset+'@'+range[0]
if len(range) > 1:
to_delete += '%' + range[-1]
to_delete = to_delete.replace(' ', '')
if not to_delete:
continue
if args.verbose:
print('zfs destroy ' + to_delete)
if not args.test:
# destroy the snapshot
subprocess.call(['zfs', 'destroy', to_delete])

View File

@@ -0,0 +1,28 @@
#!/usr/bin/env bash
# Runs smartctl to report current temperature of all disks.
JSON="["
DISKS=$(/sbin/sysctl -n kern.disks | cut -d= -f2)
for i in ${DISKS}
do
# Get temperature from smartctl (requires root).
[[ "${i}" = *"ada"* ]] && TEMP=$(/usr/local/sbin/smartctl -l scttemp /dev/$i | grep '^Current Temperature:' | awk '{print $3}')
[[ "${i}" = *"nvd"* ]] && DEVICE_NUMBER=$(echo ${i} | cut -c 4) && TEMP=$(smartctl -a /dev/nvme${DEVICE_NUMBER} | grep Temperature: | head -1 | awk '{print $2}')
if [ ${TEMP:-0} -gt 0 ]
then
JSON=$(echo "${JSON}{")
JSON=$(echo "${JSON}\"temperature\":${TEMP},")
JSON=$(echo "${JSON}\"disk\":\"${i}\"")
JSON=$(echo "${JSON}},")
fi
done
# Remove trailing "," on last field.
JSON=$(echo ${JSON} | sed 's/,$//')
echo -e "${JSON}]"

View File

@@ -0,0 +1,7 @@
---
- name: restart postgresql
ansible.builtin.service:
name: postgresql
state: restarted
delegate_to: "{{ postgres_jail_ip.stdout }}"
remote_user: root

View File

@@ -0,0 +1,21 @@
---
- name: directories | create
ansible.builtin.file:
state: directory
path: "{{ item }}"
mode: 0775
loop:
- "{{ homelab_homedir }}/letsencrypt"
- "{{ telegraf_dir }}"
- "{{ backups_dir }}servers/{{ ansible_facts['nodename'] }}"
- "{{ scripts_dir }}"
- name: directories | truenas
ansible.builtin.file:
state: directory
path: "{{ item }}"
mode: 0775
loop:
- "{{ backups_dir }}servers/coreelec.{{ secret_domain }}"
- "{{ backups_dir }}servers/opnsense.{{ secret_domain }}"
when: "main_nas"

View File

@@ -0,0 +1,112 @@
---
- name: jail-borgserver | get jail ip
ansible.builtin.shell:
cmd: iocage exec borgserver ifconfig epair0b | grep 'inet' | awk -F ' ' '{ print $2 }'
changed_when: false
register: borgserver_jail_ip
become: true
- block:
- name: jail-borgserver | create zfs pools
community.general.zfs:
name: "{{ item }}"
state: present
loop:
- "{{ pool_name }}/jail-mounts"
- "{{ pool_name }}/jail-mounts/borgserver"
- "{{ pool_name }}/jail-mounts/borgserver/backups"
- "{{ pool_name }}/jail-mounts/borgserver/keys"
- name: jail-borgserver | create empty dirs
ansible.builtin.shell:
cmd: iocage exec borgserver mkdir -p /{{ item }}
loop:
- backups
- keys
- name: jail-borgserver | mount dirs
ansible.builtin.shell:
cmd: iocage fstab -a borgserver /mnt/{{ pool_name }}/jail-mounts/borgserver/{{ item }} /{{ item }} nullfs rw 0 0
loop:
- backups
- keys
become: true
- block:
- name: jail-borgserver | packages
community.general.pkgng:
name:
#- py39-borgbackup
- sshguard
state: present
- name: jail-borgserver | download borg cli
ansible.builtin.get_url:
url: https://github.com/borgbackup/borg/releases/download/1.2.1/borg-freebsd64
dest: /usr/local/bin/borg
mode: 0755
- name: jail-borgserver | user borg
ansible.builtin.user:
name: borg
uid: 1000
state: present
- name: jail-borgserver | create directories
ansible.builtin.file:
path: /home/borg/.ssh
owner: 1000
group: 1000
state: directory
- name: jail-borgserver | authorized_keys
ansible.builtin.file:
path: /home/borg/.ssh/authorized_keys
owner: 1000
group: 1000
state: touch
- name: jail-borgserver | change folders mod
ansible.builtin.file:
path: "{{ item }}"
owner: 1000
group: 1000
loop:
- /backups
- /keys
- name: jail-borgserver | copy sshd_config
ansible.builtin.copy:
src: borgserver/sshd_config
dest: /etc/ssh/sshd_config'
mode: 0644
- name: jail-borgserver | copy borgserver rc.d
ansible.builtin.copy:
src: borgserver/rc.d
dest: /etc/rc.d/borgserver
mode: 0755
- name: jail-borgserver | configure sshguard
community.general.sysrc:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
loop:
- { name: "sshguard_enable", value: "YES" }
- { name: "sshguard_danger_thresh", value: "30" }
- { name: "sshguard_release_interval", value: "600" }
- { name: "sshguard_reset_interval", value: "7200" }
- name: jail-borgserver | start sshguard service
ansible.builtin.service:
name: sshguard
state: started
- name: jail-borgserver | restart sshd service
ansible.builtin.service:
name: sshd
state: restarted
delegate_to: "{{ borgserver_jail_ip.stdout }}"
remote_user: root

View File

@@ -0,0 +1,31 @@
---
- name: jail-prepare | {{ outside_item.item }} | create .ssh directory
ansible.builtin.shell:
cmd: iocage exec {{ outside_item.item }} 'mkdir -p /root/.ssh; echo "" > /root/.ssh/authorized_keys; chmod 700 /root/.ssh; chmod 600 /root/.ssh/authorized_keys'
become: true
- name: jail-prepare | {{ outside_item.item }} | deploy ssh keys
ansible.builtin.shell:
cmd: iocage exec {{ outside_item.item }} 'echo "{{ item }}" >> /root/.ssh/authorized_keys'
loop: "{{ public_ssh_keys }}"
become: true
- name: jail-prepare | {{ outside_item.item }} | activate sshd
ansible.builtin.shell:
cmd: iocage exec {{ outside_item.item }} 'sysrc sshd_enable="YES"'
become: true
- name: jail-prepare | {{ outside_item.item }} | sshd permit root login
ansible.builtin.shell:
cmd: iocage exec {{ outside_item.item }} 'echo "PermitRootLogin yes" >> /etc/ssh/sshd_config'
become: true
- name: jail-prepare | {{ outside_item.item }} | start sshd
ansible.builtin.shell:
cmd: iocage exec {{ outside_item.item }} 'service sshd start'
become: true
- name: jail-prepare | {{ outside_item.item }} | install packages
ansible.builtin.shell:
cmd: iocage exec {{ outside_item.item }} 'pkg install -y python39 bash; ln -s /usr/local/bin/bash /bin/bash'
become: true

View File

@@ -0,0 +1,41 @@
---
- name: jails | check if jail exist
ansible.builtin.shell:
cmd: iocage list | grep {{ item }}
loop: "{{ groups['truenas-jails'] }}"
register: jails_check
changed_when: false
failed_when: jails_check.rc != 0 and jails_check.rc != 1
- name: jails | is iocage fetch required
ansible.builtin.set_fact:
jail_missing: true
loop: "{{ jails_check.results }}"
when: item.rc == 1
- block:
- name: jails | get current FreeBSD release
ansible.builtin.shell:
cmd: freebsd-version -k
register: release
failed_when: release.rc != 0
- name: jails | fetch iocage template {{ release.stdout }}
ansible.builtin.shell:
cmd: iocage fetch -r {{ release.stdout }}
become: true
- name: jails | create jail
ansible.builtin.shell:
cmd: iocage create -r {{ release.stdout }} -n {{ item.item }} dhcp=on boot=on
loop: "{{ jails_check.results }}"
when: item.rc == 1
become: true
when: jail_missing
- name: jails | init jails
ansible.builtin.include_tasks: init.yml
loop: "{{ jails_check.results }}"
loop_control:
loop_var: outside_item
when: outside_item.rc == 1

View File

@@ -0,0 +1,60 @@
---
- name: jail-postgres | get jail ip
ansible.builtin.shell:
cmd: iocage exec postgres ifconfig epair0b | grep 'inet' | awk -F ' ' '{ print $2 }'
changed_when: false
register: postgres_jail_ip
become: true
- name: jail-postgres | copy letsencrypt certificate
ansible.builtin.copy:
src: /mnt/storage/home/homelab/letsencrypt/{{ secret_domain }}/{{ item.src }}
remote_src: true
dest: /mnt/storage/jail-mounts/postgres/data{{ postgres_version }}/{{ item.dest }}
owner: 770
group: 770
mode: 0600
loop:
- { src: "fullchain.pem", dest: "server.crt" }
- { src: "key.pem", dest: "server.key" }
notify: restart postgresql
become: true
- block:
- name: jail-postgres | disable full page writes because of ZFS
ansible.builtin.lineinfile:
path: /var/db/postgres/data{{ postgres_version }}/postgresql.conf
regexp: '^full_page_writes\s*='
line: "full_page_writes=off"
state: present
notify: restart postgresql
- name: jail-postgres | listen to all addresses
ansible.builtin.lineinfile:
path: /var/db/postgres/data{{ postgres_version }}/postgresql.conf
regexp: '^listen_addresses\s*='
line: "listen_addresses = '*'"
state: present
notify: restart postgresql
- name: jail-postgres | ssl configuration
ansible.builtin.blockinfile:
path: /var/db/postgres/data{{ postgres_version }}/postgresql.conf
block: |
ssl = on
ssl_cert_file = 'server.crt'
ssl_key_file = 'server.key'
ssl_prefer_server_ciphers = on
state: present
notify: restart postgresql
- name: jail-postgres | configure postgres
ansible.builtin.template:
src: postgres/pg_hba.conf
dest: /var/db/postgres/data{{ postgres_version }}/pg_hba.conf
owner: postgres
group: postgres
notify: restart postgresql
delegate_to: "{{ postgres_jail_ip.stdout }}"
remote_user: root

View File

@@ -0,0 +1,143 @@
---
- name: jail-postgres | get jail ip
ansible.builtin.shell:
cmd: iocage exec postgres ifconfig epair0b | grep 'inet' | awk -F ' ' '{ print $2 }'
changed_when: false
register: postgres_jail_ip
become: true
- block:
- name: jail-postgres | create zfs pools
community.general.zfs:
name: "{{ item }}"
state: present
loop:
- "{{ pool_name }}/jail-mounts"
- "{{ pool_name }}/jail-mounts/postgres"
- "{{ pool_name }}/jail-mounts/postgres/data{{ postgres_version }}"
- name: jail-postgres | configure zfs pool postgresql
community.general.zfs:
name: "{{ pool_name }}/jail-mounts/postgres"
state: present
extra_zfs_properties:
atime: off
setuid: off
- name: jail-postgres | configure zfs pool postgresql
community.general.zfs:
name: "{{ pool_name }}/jail-mounts/postgres"
state: present
extra_zfs_properties:
atime: off
setuid: off
- name: jail-postgres | create empty data{{ postgres_version }} dir
ansible.builtin.shell:
cmd: iocage exec postgres mkdir -p /var/db/postgres/data{{ postgres_version }}
- name: jail-postgres | mount data{{ postgres_version }}
ansible.builtin.shell:
cmd: iocage fstab -a postgres /mnt/{{ pool_name }}/jail-mounts/postgres/data{{ postgres_version }} /var/db/postgres/data{{ postgres_version }} nullfs rw 0 0
become: true
- block:
- name: jail-postgres | packages
community.general.pkgng:
name:
- postgresql{{ postgres_version }}-server
- postgresql{{ postgres_version }}-contrib
- postgresql{{ postgres_version }}-client
- py39-pip
state: present
- name: jail-postgres | pip packages
ansible.builtin.pip:
name: psycopg2
state: present
- name: jail-postgres | change postgres/data{{ postgres_version }} mod
ansible.builtin.file:
path: /var/db/postgres/data{{ postgres_version }}
owner: postgres
group: postgres
- name: jail-postgres | initdb
ansible.builtin.shell:
cmd: su -m postgres -c 'initdb -E UTF-8 /var/db/postgres/data{{ postgres_version }}'
- name: jail-postgres | move base and pg_wal
ansible.builtin.shell:
cmd: su -m postgres -c 'mv /var/db/postgres/data{{ postgres_version }}/{{ item }} /var/db/postgres/data{{ postgres_version }}/{{ item }}0'
loop:
- base
- pg_wal
- name: jail-postgres | create base and pg_wal empty dirs
ansible.builtin.file:
path: /var/db/postgres/data{{ postgres_version }}/{{ item }}
state: directory
owner: postgres
group: postgres
loop:
- base
- pg_wal
delegate_to: "{{ postgres_jail_ip.stdout }}"
remote_user: root
- block:
- name: jail-postgres | create missing zfs pools
community.general.zfs:
name: "{{ item }}"
state: present
loop:
- "{{ pool_name }}/jail-mounts/postgres/data{{ postgres_version }}/base"
- "{{ pool_name }}/jail-mounts/postgres/data{{ postgres_version }}/pg_wal"
- name: jail-postgres | mount base
ansible.builtin.shell:
cmd: iocage fstab -a postgres /mnt/{{ pool_name }}/jail-mounts/postgres/data{{ postgres_version }}/{{ item }} /var/db/postgres/data{{ postgres_version }}/{{ item }} nullfs rw 0 0
loop:
- base
- pg_wal
become: true
- block:
- name: jail-postgres | move base and pg_wal content to mounts
ansible.builtin.shell:
cmd: mv /var/db/postgres/data{{ postgres_version }}/{{ item }}0/* /var/db/postgres/data{{ postgres_version }}/{{ item }}/; rmdir /var/db/postgres/data{{ postgres_version }}/{{ item }}0
loop:
- base
- pg_wal
- name: jail-postgres | change mod
ansible.builtin.file:
path: /var/db/postgres/data{{ postgres_version }}/{{ item }}
state: directory
owner: postgres
group: postgres
recurse: true
loop:
- base
- pg_wal
- name: jail-postgres | enable postgresql service
community.general.sysrc:
name: postgresql_enable
state: present
value: "YES"
- name: jail-postgres | start postgresql service
ansible.builtin.service:
name: postgresql
state: started
- name: jail-postgres | change postgres password
postgresql_query:
login_user: postgres
query: ALTER USER postgres PASSWORD '{{ postgres_password }}'
delegate_to: "{{ postgres_jail_ip.stdout }}"
remote_user: root

View File

@@ -0,0 +1,36 @@
---
- ansible.builtin.include_tasks: directories.yml
- ansible.builtin.include_tasks: scripts.yml
- ansible.builtin.include_tasks: telegraf.yml
- ansible.builtin.include_tasks: wireguard.yml
when: "main_nas == false"
- block:
- ansible.builtin.include_tasks: jails/main.yml
# - ansible.builtin.shell:
# cmd: test -f /mnt/storage/jail-mounts/postgres/data{{ postgres_version }}/postgresql.conf
# register: postgres_data_exists
# become: true
# changed_when: false
# failed_when: postgres_data_exists.rc != 0 and postgres_data_exists.rc != 1
# - ansible.builtin.include_tasks: jails/postgres-init.yml
# when: postgres_data_exists.rc == 1
# - ansible.builtin.include_tasks: jails/postgres-conf.yml
- ansible.builtin.shell:
cmd: test -f /mnt/storage/jail-mounts/borgserver/keys/host/ssh_host_ed25519_key
register: borgserver_data_exists
become: true
changed_when: false
failed_when: borgserver_data_exists.rc != 0 and borgserver_data_exists.rc != 1
- ansible.builtin.include_tasks: jails/borgserver-init.yml
when: borgserver_data_exists.rc == 1
when: "main_nas"

View File

@@ -0,0 +1,25 @@
---
- name: scripts | copy scripts
ansible.builtin.copy:
src: "scripts/{{ item }}"
dest: "{{ scripts_dir }}/{{ item }}"
mode: 0755
loop:
- certificates_deploy.py
- snapshots_clearempty.py
- snapshots_prune.py
- telegraf_hddtemp.bash
- name: scripts | template scripts
ansible.builtin.template:
src: "scripts/{{ item.name }}"
dest: "{{ scripts_dir }}/{{ item.name }}"
mode: "{{ item.mode }}"
loop:
- { name: "backupconfig_cloudsync_pre.bash", mode: "0775" }
- { name: "certificates_deploy.bash", mode: "0775" }
- { name: "certificates_deploy.conf", mode: "0664" }
- { name: "snapshots_prune.sh", mode: "0775" }
- { name: "report_pools.sh", mode: "0775" }
- { name: "report_smart.sh", mode: "0775" }
- { name: "report_ups.sh", mode: "0775" }

View File

@@ -0,0 +1,12 @@
---
- name: telegraf | clone git repository
ansible.builtin.git:
repo: https://github.com/samuelkadolph/truenas-telegraf
dest: "{{ telegraf_dir }}"
version: main
- name: telegraf | copy configuration
ansible.builtin.template:
src: telegraf/telegraf.conf
dest: "{{ telegraf_dir }}/telegraf.conf"
mode: 0775

View File

@@ -0,0 +1,17 @@
---
- name: wireguard | configuration
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "{{ item.mode }}"
loop:
- {
src: "wireguard/{{ ansible_facts['nodename'] }}.conf",
dest: "{{ homelab_homedir }}/{{ wg_interface }}.conf",
mode: 400,
}
- {
src: "wireguard/ip-check.bash",
dest: "{{ homelab_homedir }}/wireguard-ip-check.bash",
mode: 700,
}

View File

@@ -0,0 +1,98 @@
# PostgreSQL Client Authentication Configuration File
# ===================================================
#
# Refer to the "Client Authentication" section in the PostgreSQL
# documentation for a complete description of this file. A short
# synopsis follows.
#
# This file controls: which hosts are allowed to connect, how clients
# are authenticated, which PostgreSQL user names they can use, which
# databases they can access. Records take one of these forms:
#
# local DATABASE USER METHOD [OPTIONS]
# host DATABASE USER ADDRESS METHOD [OPTIONS]
# hostssl DATABASE USER ADDRESS METHOD [OPTIONS]
# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS]
# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS]
# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS]
#
# (The uppercase items must be replaced by actual values.)
#
# The first field is the connection type:
# - "local" is a Unix-domain socket
# - "host" is a TCP/IP socket (encrypted or not)
# - "hostssl" is a TCP/IP socket that is SSL-encrypted
# - "hostnossl" is a TCP/IP socket that is not SSL-encrypted
# - "hostgssenc" is a TCP/IP socket that is GSSAPI-encrypted
# - "hostnogssenc" is a TCP/IP socket that is not GSSAPI-encrypted
#
# DATABASE can be "all", "sameuser", "samerole", "replication", a
# database name, or a comma-separated list thereof. The "all"
# keyword does not match "replication". Access to replication
# must be enabled in a separate record (see example below).
#
# USER can be "all", a user name, a group name prefixed with "+", or a
# comma-separated list thereof. In both the DATABASE and USER fields
# you can also write a file name prefixed with "@" to include names
# from a separate file.
#
# ADDRESS specifies the set of hosts the record matches. It can be a
# host name, or it is made up of an IP address and a CIDR mask that is
# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
# specifies the number of significant bits in the mask. A host name
# that starts with a dot (.) matches a suffix of the actual host name.
# Alternatively, you can write an IP address and netmask in separate
# columns to specify the set of hosts. Instead of a CIDR-address, you
# can write "samehost" to match any of the server's own IP addresses,
# or "samenet" to match any address in any subnet that the server is
# directly connected to.
#
# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256",
# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert".
# Note that "password" sends passwords in clear text; "md5" or
# "scram-sha-256" are preferred since they send encrypted passwords.
#
# OPTIONS are a set of options for the authentication in the format
# NAME=VALUE. The available options depend on the different
# authentication methods -- refer to the "Client Authentication"
# section in the documentation for a list of which options are
# available for which authentication methods.
#
# Database and user names containing spaces, commas, quotes and other
# special characters must be quoted. Quoting one of the keywords
# "all", "sameuser", "samerole" or "replication" makes the name lose
# its special character, and just match a database or username with
# that name.
#
# This file is read on server startup and when the server receives a
# SIGHUP signal. If you edit the file on a running system, you have to
# SIGHUP the server for the changes to take effect, run "pg_ctl reload",
# or execute "SELECT pg_reload_conf()".
#
# Put your actual configuration here
# ----------------------------------
#
# If you want to allow non-local connections, you need to add more
# "host" records. In that case you will also need to make PostgreSQL
# listen on a non-local interface via the listen_addresses
# configuration parameter, or via the -i or -h command line switches.
# CAUTION: Configuring the system for local "trust" authentication
# allows any local user to connect as any PostgreSQL user, including
# the database superuser. If you do not trust all your local users,
# use another authentication method.
# TYPE DATABASE USER ADDRESS METHOD
# "local" is for Unix domain socket connections only
local all all trust
# IPv4 local connections:
hostssl all all 0.0.0.0/0 scram-sha-256
hostnossl joplin joplin 0.0.0.0/0 trust
# IPv6 local connections:
# Allow replication connections from localhost, by a user with the
# replication privilege.
local replication all trust
host replication all 127.0.0.1/32 trust
host replication all ::1/128 trust

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# DEBUG
# set -x
# Configuration backup Cloud Sync pre-script
# Variables
DATE=$(date +%Y%m%d)
BACKUP_FOLDER="{{ backups_dir }}servers/{{ ansible_facts['nodename'] }}"
cp -p /data/freenas-v1.db ${BACKUP_FOLDER}/${DATE}.db
chmod -R 775 ${BACKUP_FOLDER}/${DATE}.db
chown -R homelab:homelab ${BACKUP_FOLDER}/${DATE}.db
# Keep the last 90 backups on disk
find ${BACKUP_FOLDER}/*.db -mtime +90 -type f -delete

View File

@@ -0,0 +1,33 @@
#!/bin/bash
# DEBUG
# set -x
# Get certificates from remote server
# Variables
SCRIPT_PATH="{{ scripts_dir }}"
CERTIFICATE_PATH="{{ certificates_dir }}"
CONFIG_FILE="${SCRIPT_PATH}/certificates_deploy.conf"
{% if main_nas == true %}POSTGRES_DIR="/mnt/storage/jail-mounts/postgres/data{{ postgres_version }}/"{% endif %}
# Check if cert has been uploaded last week
result=$(find ${CERTIFICATE_PATH}/cert.pem -mtime -7)
if [[ "$result" == "${CERTIFICATE_PATH}/cert.pem" ]]; then
# Deploy certificate (truenas UI & minio)
python ${SCRIPT_PATH}/certificates_deploy.py -c ${CONFIG_FILE}
test $? -ne 0 && FLAG_NOTIF=true
{% if main_nas == true %}
# Deploy certificate (postgresql jail)
umask 0177
cp ${CERTIFICATE_PATH}/fullchain.pem ${POSTGRES_DIR}/server.crt
cp ${CERTIFICATE_PATH}/key.pem ${POSTGRES_DIR}/server.key
chown 770:770 ${POSTGRES_DIR}/server.crt ${POSTGRES_DIR}/server.key
chmod 600 ${POSTGRES_DIR}/server.crt ${POSTGRES_DIR}/server.key
# restart postgresql
iocage postgres service postgresql restart
{% endif %}
fi

View File

@@ -0,0 +1,48 @@
# Configuration file for deploy_certificates.py
[deploy]
# Choose one of the following authentication methods, "api_key" or "password" (comment out the other one).
# Auth via API keys is highly recommended, but is only available from TrueNAS (Core) 12.0 up.
# You can generate a new API key in the web interface under "Settings" (upper right) > "API Keys".
api_key = {{ root_api_key }}
# If you are on FreeNAS 11 or lower, set this to your FreeNAS root password
# password =
# Everything below here is optional
# cert_fqdn specifies the FQDN used for your certificate. Default is your system hostname
# cert_fqdn = foo.bar.baz
# connect_host specifies the hostname the script should attempt to connect to, to deploy the cert.
# Default is localhost (assuming the script is running on your FreeNAS box)
# connect_host = baz.bar.foo
# verify sets whether the script will attempt to verify the server's certificate with a HTTPS
# connection. Set to true if you're using a HTTPS connection to a remote host. If connect_host
# is set to localhost (or is unset), set to false. Default is false.
# verify = false
# privkey_path is the path to the certificate private key on your system. Default
# assumes you're using acme.sh:
# /root/.acme.sh/cert_fqdn/cert_fqdn.key
privkey_path = {{ certificates_dir }}/key.pem
# fullchain_path is the path to the full chain (leaf cert + intermediate certs)
# on your system. Default assumes you're using acme.sh:
# /root/.acme.sh/cert_fqdn/fullchain.cer
fullchain_path = {{ certificates_dir }}/fullchain.pem
# protocol sets the connection protocol, http or https. Include '://' at the end.
# Default is http
# protocol = https://
# port sets the port to use to connect. Default is 80. If protocol is https,
# this MUST be set to your https port.
# port = 443
# set ftp_enabled to true if you have the FTP service enabled on your FreeNAS. Default is false.
# ftp_enabled = true
{% if service_s3 is defined %}
s3_enabled = true
{% endif %}

View File

@@ -0,0 +1,162 @@
#!/bin/sh
### Parameters ###
# Specify your email address here:
email="truenas@{{ secret_email_domain }}"
# zpool output changed from FreeNAS version 11.0 to 11.1, breaking
# our parsing of the scrubErrors and scrubDate variables. Added a
# conditional to test for the FreeNAS version and parse accordingly.
# This changed again with the release of TrueNAS. Ironically, back to
# the way parsing worked with older versions of FreeNAS.
#
# We obtain the FreeBSD version using uname, as suggested by user
# Chris Moore on the FreeBSD forum.
#
# 'uname -K' gives 7-digit OS release and version, e.g.:
#
# FreeBSD 11.0 1100512
# FreeBSD 11.1 1101505
# FreeBSD 12.2 1202000
fbsd_relver=$(uname -K)
freenashost=$(hostname -s | tr '[:lower:]' '[:upper:]')
boundary="===== MIME boundary; FreeNAS server ${freenashost} ====="
logfile="/tmp/zpool_report.tmp"
subject="ZPool Status Report for ${freenashost}"
pools=$(zpool list -H -o name)
usedWarn=75
usedCrit=90
scrubAgeWarn=30
warnSymbol="?"
critSymbol="!"
### Set email headers ###
printf "%s\n" "To: ${email}
Subject: ${subject}
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary=\"$boundary\"
--${boundary}
Content-Type: text/html; charset=\"US-ASCII\"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline
<html><head></head><body><pre style=\"font-size:14px; white-space:pre\">" >> ${logfile}
###### summary ######
(
echo "########## ZPool status report summary for all pools on server ${freenashost} ##########"
echo ""
echo "+--------------+--------+------+------+------+----+----+--------+------+-----+"
echo "|Pool Name |Status |Read |Write |Cksum |Used|Frag|Scrub |Scrub |Last |"
echo "| | |Errors|Errors|Errors| | |Repaired|Errors|Scrub|"
echo "| | | | | | | |Bytes | |Age |"
echo "+--------------+--------+------+------+------+----+----+--------+------+-----+"
) >> ${logfile}
for pool in $pools; do
if [ "$fbsd_relver" -ge 1101000 ]; then
frag="$(zpool list -H -o frag "$pool")"
else
if [ "${pool}" = "freenas-boot" ] || [ "${pool}" = "boot-pool" ]; then
frag=""
else
frag="$(zpool list -H -o frag "$pool")"
fi
fi
status="$(zpool list -H -o health "$pool")"
errors="$(zpool status "$pool" | grep -E "(ONLINE|DEGRADED|FAULTED|UNAVAIL|REMOVED)[ \t]+[0-9]+")"
readErrors=0
for err in $(echo "$errors" | awk '{print $3}'); do
if echo "$err" | grep -E -q "[^0-9]+"; then
readErrors=1000
break
fi
readErrors=$((readErrors + err))
done
writeErrors=0
for err in $(echo "$errors" | awk '{print $4}'); do
if echo "$err" | grep -E -q "[^0-9]+"; then
writeErrors=1000
break
fi
writeErrors=$((writeErrors + err))
done
cksumErrors=0
for err in $(echo "$errors" | awk '{print $5}'); do
if echo "$err" | grep -E -q "[^0-9]+"; then
cksumErrors=1000
break
fi
cksumErrors=$((cksumErrors + err))
done
if [ "$readErrors" -gt 999 ]; then readErrors=">1K"; fi
if [ "$writeErrors" -gt 999 ]; then writeErrors=">1K"; fi
if [ "$cksumErrors" -gt 999 ]; then cksumErrors=">1K"; fi
used="$(zpool list -H -p -o capacity "$pool")"
scrubRepBytes="N/A"
scrubErrors="N/A"
scrubAge="N/A"
if [ "$(zpool status "$pool" | grep "scan" | awk '{print $2}')" = "scrub" ]; then
scrubRepBytes="$(zpool status "$pool" | grep "scan" | awk '{print $4}')"
if [ "$fbsd_relver" -gt 1101000 ] && [ "$fbsd_relver" -lt 1200000 ]; then
scrubErrors="$(zpool status "$pool" | grep "scan" | awk '{print $10}')"
scrubDate="$(zpool status "$pool" | grep "scan" | awk '{print $17"-"$14"-"$15"_"$16}')"
else
scrubErrors="$(zpool status "$pool" | grep "scan" | awk '{print $8}')"
scrubDate="$(zpool status "$pool" | grep "scan" | awk '{print $15"-"$12"-"$13"_"$14}')"
fi
scrubTS="$(date -j -f "%Y-%b-%e_%H:%M:%S" "$scrubDate" "+%s")"
currentTS="$(date "+%s")"
scrubAge=$((((currentTS - scrubTS) + 43200) / 86400))
fi
if [ "$status" = "FAULTED" ] || [ "$used" -gt "$usedCrit" ]; then
symbol="$critSymbol"
elif [ "$scrubErrors" != "N/A" ] && [ "$scrubErrors" != "0" ]; then
symbol="$critSymbol"
elif [ "$status" != "ONLINE" ] \
|| [ "$readErrors" != "0" ] \
|| [ "$writeErrors" != "0" ] \
|| [ "$cksumErrors" != "0" ] \
|| [ "$used" -gt "$usedWarn" ] \
|| [ "$(echo "$scrubAge" | awk '{print int($1)}')" -gt "$scrubAgeWarn" ]; then
symbol="$warnSymbol"
elif [ "$scrubRepBytes" != "0" ] && [ "$scrubRepBytes" != "0B" ] && [ "$scrubRepBytes" != "N/A" ]; then
symbol="$warnSymbol"
else
symbol=" "
fi
(
printf "|%-12s %1s|%-8s|%6s|%6s|%6s|%3s%%|%4s|%8s|%6s|%5s|\n" \
"$pool" "$symbol" "$status" "$readErrors" "$writeErrors" "$cksumErrors" \
"$used" "$frag" "$scrubRepBytes" "$scrubErrors" "$scrubAge"
) >> ${logfile}
done
(
echo "+--------------+--------+------+------+------+----+----+--------+------+-----+"
) >> ${logfile}
###### for each pool ######
for pool in $pools; do
(
echo ""
echo "########## ZPool status report for ${pool} ##########"
echo ""
zpool status -v "$pool"
) >> ${logfile}
done
printf "%s\n" "</pre></body></html>
--${boundary}--" >> ${logfile}
### Send report ###
if [ -z "${email}" ]; then
echo "No email address specified, information available in ${logfile}"
else
sendmail -t -oi < ${logfile}
rm ${logfile}
fi

View File

@@ -0,0 +1,267 @@
#!/bin/sh
### Parameters ###
# Specify your email address here:
email="truenas@{{ secret_email_domain }}"
# Full path to 'smartctl' program:
smartctl=/usr/local/sbin/smartctl
freenashost=$(hostname -s | tr '[:lower:]' '[:upper:]')
boundary="===== MIME boundary; FreeNAS server ${freenashost} ====="
logfile="smart_report.tmp"
subject="SMART Status Report for ${freenashost}"
tempWarn=40
tempCrit=45
sectorsCrit=10
testAgeWarn=1
warnSymbol="?"
critSymbol="!"
Drive_count=0
SATA_count=0
SAS_count=0
Drive_list=""
SATA_list=""
SAS_list=""
# Get list of SMART-enabled drives
get_smart_drives()
{
gs_drives=$("$smartctl" --scan | awk '{print $1}')
for gs_drive in $gs_drives; do
gs_smart_flag=$("$smartctl" -i "$gs_drive" | grep -E "SMART support is:[[:blank:]]+Enabled" | awk '{print $4}')
if [ "$gs_smart_flag" = "Enabled" ]; then
Drive_list="$Drive_list $gs_drive"
Drive_count=$((Drive_count + 1))
fi
done
}
# Get list of SATA disks, including older drives that only report an ATA version
get_sata_drives()
{
for drive in $Drive_list; do
lFound=0
gsata_smart_flag=$("$smartctl" -i "$drive" | grep -E "SATA Version is:[[:blank:]]" | awk '{print $4}')
if [ "$gsata_smart_flag" = "SATA" ]; then
lFound=$((lFound + 1))
else
gsata_smart_flag=$("$smartctl" -i "$drive" | grep -E "ATA Version is:[[:blank:]]" | awk '{print $1}')
if [ "$gsata_smart_flag" = "ATA" ]; then
lFound=$((lFound + 1))
fi
fi
if [ $lFound -gt 0 ]; then
SATA_list="$SATA_list $drive"
SATA_count=$((SATA_count + 1))
fi
done
}
# Get list of SAS disks
get_sas_drives()
{
for drive in $Drive_list; do
gsas_smart_flag=$("$smartctl" -i "$drive" | grep -E "Transport protocol:[[:blank:]]+SAS" | awk '{print $3}')
if [ "$gsas_smart_flag" = "SAS" ]; then
SAS_list="$SAS_list $drive"
SAS_count=$((SAS_count + 1))
fi
done
}
### Fetch drive lists ###
get_smart_drives
get_sata_drives
get_sas_drives
### Set email headers ###
printf "%s\n" "To: ${email}
Subject: ${subject}
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary=\"$boundary\"
--${boundary}
Content-Type: text/html; charset=\"US-ASCII\"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline
<html><head></head><body><pre style=\"font-size:14px; white-space:pre\">" > ${logfile}
if [ $Drive_count -eq 0 ]; then
echo "##### No SMART-enabled disks found on this system #####" >> "$logfile"
fi
###### Summary for SATA drives ######
if [ $SATA_count -gt 0 ]; then
(
echo "########## SMART status report summary for all SATA drives on server ${freenashost} ##########"
echo ""
echo "+------+------------------------+----+------+-----+-----+-------+-------+--------+------+----------+------+-----------+----+"
echo "|Device|Serial |Temp| Power|Start|Spin |ReAlloc|Current|Offline |Seek |Total |High | Command|Last|"
echo "| |Number | | On |Stop |Retry|Sectors|Pending|Uncorrec|Errors|Seeks |Fly | Timeout|Test|"
echo "| | | | Hours|Count|Count| |Sectors|Sectors | | |Writes| Count |Age |"
echo "+------+------------------------+----+------+-----+-----+-------+-------+--------+------+----------+------+-----------+----+"
) >> "$logfile"
###### Detail information for each SATA drive ######
for drive in $SATA_list; do
(
devid=$(basename "$drive")
lastTestHours=$("$smartctl" -l selftest "$drive" | grep "# 1" | awk '{print $9}')
"$smartctl" -A -i -v 7,hex48 "$drive" | \
awk -v device="$devid" -v tempWarn="$tempWarn" -v tempCrit="$tempCrit" -v sectorsCrit="$sectorsCrit" \
-v testAgeWarn="$testAgeWarn" -v warnSymbol="$warnSymbol" -v critSymbol="$critSymbol" \
-v lastTestHours="$lastTestHours" '
/Serial Number:/{serial=$3}
/190 Airflow_Temperature/{temp=$10}
/194 Temperature/{temp=$10}
/Power_On_Hours/{split($10,a,"+");sub(/h/,"",a[1]);onHours=a[1];}
/Start_Stop_Count/{startStop=$10}
/Spin_Retry_Count/{spinRetry=$10}
/Reallocated_Sector/{reAlloc=$10}
/Current_Pending_Sector/{pending=$10}
/Offline_Uncorrectable/{offlineUnc=$10}
/Seek_Error_Rate/{seekErrors=("0x" substr($10,3,4));totalSeeks=("0x" substr($10,7))}
/High_Fly_Writes/{hiFlyWr=$10}
/Command_Timeout/{cmdTimeout=$10}
END {
testAge=sprintf("%.0f", (onHours - lastTestHours) / 24);
if (temp > tempCrit || reAlloc > sectorsCrit || pending > sectorsCrit || offlineUnc > sectorsCrit)
device=device " " critSymbol;
else if (temp > tempWarn || reAlloc > 0 || pending > 0 || offlineUnc > 0 || testAge > testAgeWarn)
device=device " " warnSymbol;
seekErrors=sprintf("%d", seekErrors);
totalSeeks=sprintf("%d", totalSeeks);
if (totalSeeks == "0") {
seekErrors="N/A";
totalSeeks="N/A";
}
if (temp > tempWarn || temp > tempCrit) temp=temp"*"
if (reAlloc > 0 || reAlloc > sectorsCrit) reAlloc=reAlloc"*"
if (pending > 0 || pending > sectorsCrit) pending=pending"*"
if (offlineUnc > 0 || offlineUnc > sectorsCrit) offlineUnc=offlineUnc"*"
if (testAge > testAgeWarn) testAge=testAge"*"
if (hiFlyWr == "") hiFlyWr="N/A";
if (cmdTimeout == "") cmdTimeout="N/A";
printf "|%-6s|%-24s|%-4s|%6s|%5s|%5s|%7s|%7s|%8s|%6s|%10s|%6s|%11s|%4s|\n",
device, serial, temp, onHours, startStop, spinRetry, reAlloc, pending, offlineUnc,
seekErrors, totalSeeks, hiFlyWr, cmdTimeout, testAge;
}'
) >> "$logfile"
done
(
echo "+------+------------------------+----+------+-----+-----+-------+-------+--------+------+----------+------+-----------+----+"
) >> "$logfile"
fi
###### Summary for SAS drives ######
if [ $SAS_count -gt 0 ]; then
(
if [ $SATA_count -gt 0 ]; then
echo ""
fi
echo "########## SMART status report summary for all SAS drives on server ${freenashost} ##########"
echo ""
echo "+------+------------------------+----+-----+------+------+------+------+------+------+"
echo "|Device|Serial |Temp|Start|Load |Defect|Uncorr|Uncorr|Uncorr|Non |"
echo "| |Number | |Stop |Unload|List |Read |Write |Verify|Medium|"
echo "| | | |Count|Count |Elems |Errors|Errors|Errors|Errors|"
echo "+------+------------------------+----+-----+------+------+------+------+------+------+"
) >> "$logfile"
###### Detail information for each SAS drive ######
for drive in $SAS_list; do
(
devid=$(basename "$drive")
"$smartctl" -a "$drive" | \
awk -v device="$devid" -v tempWarn="$tempWarn" -v tempCrit="$tempCrit" \
-v warnSymbol="$warnSymbol" -v critSymbol="$critSymbol" '\
/Serial number:/{serial=$3}
/Current Drive Temperature:/{temp=$4} \
/start-stop cycles:/{startStop=$4} \
/load-unload cycles:/{loadUnload=$4} \
/grown defect list:/{defectList=$6} \
/read:/{readErrors=$8} \
/write:/{writeErrors=$8} \
/verify:/{verifyErrors=$8} \
/Non-medium error count:/{nonMediumErrors=$4} \
END {
if (temp > tempCrit)
device=device " " critSymbol;
else if (temp > tempWarn)
device=device " " warnSymbol;
printf "|%-6s|%-24s| %3s|%5s|%6s|%6s|%6s|%6s|%6s|%6s|\n",
device, serial, temp, startStop, loadUnload, defectList, \
readErrors, writeErrors, verifyErrors, nonMediumErrors;
}'
) >> "$logfile"
done
(
echo "+------+------------------------+----+-----+------+------+------+------+------+------+"
) >> "$logfile"
fi
if [ $SATA_count -gt 0 ] || [ $SAS_count -gt 0 ]; then
###### Emit SATA drive information ######
for drive in $SATA_list; do
vendor=$("$smartctl" -i "$drive" | grep "Vendor:" | awk '{print $NF}')
if [ -z "$vendor" ]; then
dfamily=$("$smartctl" -i "$drive" | grep "Model Family" | awk '{print $3, $4, $5, $6, $7}' | sed -e 's/[[:space:]]*$//')
dmodel=$("$smartctl" -i "$drive" | grep "Device Model" | awk '{print $3, $4, $5, $6, $7}' | sed -e 's/[[:space:]]*$//')
if [ -z "$dfamily" ]; then
dinfo=$dmodel
else
dinfo="$dfamily ($dmodel)"
fi
else
product=$("$smartctl" -i "$drive" | grep "Product:" | awk '{print $NF}')
revision=$("$smartctl" -i "$drive" | grep "Revision:" | awk '{print $NF}')
dinfo="$vendor $product $revision"
fi
serial=$("$smartctl" -i "$drive" | grep "Serial Number" | awk '{print $3}')
(
echo ""
echo "########## SATA drive $drive Serial: $serial"
echo "########## ${dinfo}"
"$smartctl" -n never -H -A -l error "$drive"
"$smartctl" -n never -l selftest "$drive" | grep "# 1 \\|Num" | cut -c6-
) >> "$logfile"
done
###### Emit SAS drive information ######
for drive in $SAS_list; do
devid=$(basename "$drive")
brand=$("$smartctl" -i "$drive" | grep "Product" | sed "s/^.* //")
serial=$("$smartctl" -i "$drive" | grep "Serial number" | sed "s/^.* //")
(
echo ""
echo "########## SMART status for SAS drive $drive $serial (${brand}) ##########"
"$smartctl" -n never -H -A -l error "$drive"
"$smartctl" -n never -l selftest "$drive" | grep "# 1 \\|Num" | cut -c6-
) >> "$logfile"
done
fi
sed -i '' -e '/smartctl 7.*/d' "$logfile"
sed -i '' -e '/smartctl 6.*/d' "$logfile"
sed -i '' -e '/smartctl 5.*/d' "$logfile"
sed -i '' -e '/smartctl 4.*/d' "$logfile"
sed -i '' -e '/Copyright/d' "$logfile"
sed -i '' -e '/=== START OF READ/d' "$logfile"
sed -i '' -e '/SMART Attributes Data/d' "$logfile"
sed -i '' -e '/Vendor Specific SMART/d' "$logfile"
sed -i '' -e '/SMART Error Log Version/d' "$logfile"
printf "%s\n" "</pre></body></html>
--${boundary}--" >> ${logfile}
### Send report ###
if [ -z "${email}" ]; then
echo "No email address specified, information available in ${logfile}"
else
sendmail -t -oi < "$logfile"
rm "$logfile"
fi

View File

@@ -0,0 +1,92 @@
#!/bin/sh
# Send UPS report to designated email address
# Reference: http://networkupstools.org/docs/developer-guide.chunked/apas01.html
### Parameters ###
# Specify your email address here:
email="truenas@{{ secret_email_domain }}"
# Set to a value greater than zero to include all available UPSC
# variables in the report:
senddetail=0
freenashost=$(hostname -s)
freenashostuc=$(hostname -s | tr '[:lower:]' '[:upper:]')
boundary="===== MIME boundary; FreeNAS server ${freenashost} ====="
logfile="/tmp/ups_report.tmp"
subject="UPS Status Report for ${freenashostuc}"
### Set email headers ###
printf "%s\n" "To: ${email}
Subject: ${subject}
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary=\"$boundary\"
--${boundary}
Content-Type: text/html; charset=\"US-ASCII\"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline
<html><head></head><body><pre style=\"font-size:14px; white-space:pre\">" >> ${logfile}
# Get a list of all ups devices installed on the system:
upslist=$(upsc -l "${freenashost}")
### Set email body ###
(
date "+Time: %Y-%m-%d %H:%M:%S"
echo ""
for ups in $upslist; do
ups_type=$(upsc "${ups}" device.type 2> /dev/null | tr '[:lower:]' '[:upper:]')
ups_mfr=$(upsc "${ups}" ups.mfr 2> /dev/null)
ups_model=$(upsc "${ups}" ups.model 2> /dev/null)
ups_serial=$(upsc "${ups}" ups.serial 2> /dev/null)
ups_status=$(upsc "${ups}" ups.status 2> /dev/null)
ups_load=$(upsc "${ups}" ups.load 2> /dev/null)
ups_realpower=$(upsc "${ups}" ups.realpower 2> /dev/null)
ups_realpowernominal=$(upsc "${ups}" ups.realpower.nominal 2> /dev/null)
ups_batterycharge=$(upsc "${ups}" battery.charge 2> /dev/null)
ups_batteryruntime=$(upsc "${ups}" battery.runtime 2> /dev/null)
ups_batteryvoltage=$(upsc "${ups}" battery.voltage 2> /dev/null)
ups_inputvoltage=$(upsc "${ups}" input.voltage 2> /dev/null)
ups_outputvoltage=$(upsc "${ups}" output.voltage 2> /dev/null)
printf "=== %s %s, model %s, serial number %s\n\n" "${ups_mfr}" "${ups_type}" "${ups_model}" "${ups_serial} ==="
echo "Name: ${ups}"
echo "Status: ${ups_status}"
echo "Output Load: ${ups_load}%"
if [ ! -z "${ups_realpower}" ]; then
echo "Real Power: ${ups_realpower}W"
fi
if [ ! -z "${ups_realpowernominal}" ]; then
echo "Real Power: ${ups_realpowernominal}W (nominal)"
fi
if [ ! -z "${ups_inputvoltage}" ]; then
echo "Input Voltage: ${ups_inputvoltage}V"
fi
if [ ! -z "${ups_outputvoltage}" ]; then
echo "Output Voltage: ${ups_outputvoltage}V"
fi
echo "Battery Runtime: ${ups_batteryruntime}s"
echo "Battery Charge: ${ups_batterycharge}%"
echo "Battery Voltage: ${ups_batteryvoltage}V"
echo ""
if [ $senddetail -gt 0 ]; then
echo "=== ALL AVAILABLE UPS VARIABLES ==="
upsc "${ups}"
echo ""
fi
done
) >> ${logfile}
printf "%s\n" "</pre></body></html>
--${boundary}--" >> ${logfile}
### Send report ###
if [ -z "${email}" ]; then
echo "No email address specified, information available in ${logfile}"
else
sendmail -t -oi < ${logfile}
rm ${logfile}
fi

View File

@@ -0,0 +1,17 @@
#!/bin/sh
# DEBUG
# set -x
# Variables
SCRIPT_PATH="{{ scripts_dir }}"
INTERVAL="{{ snapshots_interval }}"
POOL_NAME="{{ pool_name }}"
# Prune
${SCRIPT_PATH}/snapshots_prune.py --recursive --intervals ${INTERVAL} ${POOL_NAME}
${SCRIPT_PATH}/snapshots_clearempty.py --recursive ${POOL_NAME}
{% if ansible_facts['nodename'] == "truenas.{{ secret_domain }}" %}
${SCRIPT_PATH}/snapshots_prune.py --recursive --intervals daily:14 storage/video
{% endif %}

View File

@@ -0,0 +1,49 @@
[agent]
interval = "20s"
round_interval = true
metric_batch_size = 1000
metric_buffer_limit = 10000
collection_jitter = "0s"
flush_interval = "30s"
flush_jitter = "0s"
precision = ""
debug = false
quiet = false
hostname = "{{ ansible_facts['nodename'] }}"
omit_hostname = false
[[outputs.prometheus_client]]
listen = ":9273"
metric_version = 2
path = "/metrics"
string_as_label = true
expiration_interval = "60m"
[[inputs.cpu]]
percpu = true
totalcpu = true
[[inputs.diskio]]
[[inputs.exec]]
commands = ["{{ telegraf_dir }}/cputemp"]
data_format = "influx"
[[inputs.exec]]
commands = ["{{ scripts_dir }}/telegraf_hddtemp.bash"]
name_override = "disktemp"
timeout = "5s"
data_format = "json"
tag_keys = ["disk"]
[[inputs.mem]]
[[inputs.net]]
interfaces = ["em0", "igb0"]
[[inputs.system]]
[[inputs.netstat]]
[[inputs.zfs]]
poolMetrics = true

View File

@@ -0,0 +1,24 @@
#!/bin/bash
# Check status of interface
# {{ wg_interface }}: name of the interface to check
# {{ dns_hostname }}: the name of the peer whose IP should be checked
cip=$(wg show {{ wg_interface }} endpoints | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}")
echo "Wireguard peer IP from Interface: $cip"
pingip=$(ping -c 1 {{ ping_ip }} &> /dev/null && echo success || echo fail) #change ip to target server
digIP=$(dig +short {{ dns_hostname }}) #the peer address must be set
echo "$digIP"
if [ "$digIP" != "$cip" ]
then
echo "IPs doesn't match, restarting wireguard"
wg-quick down {{ homelab_homedir }}/{{ wg_interface }}.conf
wg-quick up {{ homelab_homedir }}/{{ wg_interface }}.conf
elif [ "$pingip" != "success" ]
then
echo "Ping failed, restarting wireguard..."
wg-quick down {{ homelab_homedir }}/{{ wg_interface }}.conf
wg-quick up {{ homelab_homedir }}/{{ wg_interface }}.conf
else
echo "OK"
#nothing else todo
fi

View File

@@ -0,0 +1,11 @@
[Interface]
Address = 10.10.0.2/32
ListenPort = 51820
PrivateKey = 8Gw/9MJpo8AwSmEY8W/zgPu6z0Lvn7E2LvRRDpkMhFo=
DNS = 192.168.8.1, {{ secret_domain }}
[Peer]
PublicKey = K7kgSuPwH2NA7FeLHwvGMX02kvhD8DxHgL/wflsgx34=
AllowedIPs = 0.0.0.0/0
Endpoint = services.{{ secret_domain }}:51820
PersistentKeepalive = 25

View File

@@ -0,0 +1 @@
jail_missing: false

View File

@@ -0,0 +1,42 @@
fonts_dir: ~/.local/share/fonts
icons_dir: ~/.local/share/icons
newaita_iconset_url: "https://github.com/cbrnix/Newaita/archive/1.09.20a.tar.gz"
nas_hostname: truenas.{{ secret_domain }}
mnt_dir: /mnt
nas_dir: ~/NAS
nfs_shares:
- {
src: "{{ nas_hostname }}:/mnt/storage/downloads",
path: "{{ mnt_dir }}/downloads",
link: "{{ nas_dir }}/downloads",
}
- {
src: "{{ nas_hostname }}:/mnt/storage/shared-documents",
path: "{{ mnt_dir }}/shared-documents",
link: "{{ nas_dir }}/shared-documents",
}
- {
src: "{{ nas_hostname }}:/mnt/storage/home/claude",
path: "{{ mnt_dir }}/home-claude",
link: "{{ nas_dir }}/home-claude",
}
- {
src: "{{ nas_hostname }}:/mnt/storage/home/helene",
path: "{{ mnt_dir }}/home-helene",
link: "{{ nas_dir }}/home-helene",
}
- {
src: "{{ nas_hostname }}:/mnt/storage/photo",
path: "{{ mnt_dir }}/photo",
link: "{{ nas_dir }}/photo",
}
- {
src: "{{ nas_hostname }}:/mnt/storage/music",
path: "{{ mnt_dir }}/music",
link: "/home/claude/Music",
}
- {
src: "{{ nas_hostname }}:/mnt/storage/video",
path: "{{ mnt_dir }}/video",
link: "/home/claude/Videos",
}

View File

@@ -0,0 +1,11 @@
#!/bin/bash
mkdir -p /run/media/claude/local-backups/{backups,documents,downloads,photo,piracy,jails}
# Disk one (4TB)
sudo rsync -avhP /mnt/backups/ /run/media/claude/local-backups/backups/ --delete
sudo rsync -avhP /mnt/documents/ /run/media/claude/local-backups/documents/ --delete
sudo rsync -avhP /mnt/downloads/ /run/media/claude/local-backups/downloads/ --delete
sudo rsync -avhP /mnt/photo/ /run/media/claude/local-backups/photo/ --delete
sudo rsync -avhP /mnt/piracy/ /run/media/claude/local-backups/piracy/ --delete
sudo rsync -avhP /mnt/iocage/jails/ /run/media/claude/local-backups/jails/ --delete

View File

@@ -0,0 +1,9 @@
#!/bin/bash
# Disk two (2.5TB)
mkdir -p /run/media/claude/local-backups/music
mkdir -p /run/media/claude/local-backups/home/{claude,helene}
sudo rsync -avhP /mnt/home-claude/ /run/media/claude/local-backups/home/claude/ --delete
sudo rsync -avhP /mnt/home-helene/ /run/media/claude/local-backups/home/helene/ --delete
sudo rsync -avhP /mnt/music/ /run/media/claude/local-backups/music/ --delete

View File

@@ -0,0 +1,3 @@
#!/bin/bash
pip3 list --outdated --user --format=freeze | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pip3 install -U --user

View File

@@ -0,0 +1,53 @@
[GENERAL]
# Enable or disable the script execution
Enabled: True
# SYSFS path for checking if the system is running on AC power
Sysfs_Power_Path: /sys/class/power_supply/AC*/online
## Settings to apply while connected to Battery power
[BATTERY]
# Update the registers every this many seconds
Update_Rate_s: 30
# Max package power for time window #1
PL1_Tdp_W: 29
# Time window #1 duration
PL1_Duration_s: 28
# Max package power for time window #2
PL2_Tdp_W: 44
# Time window #2 duration
PL2_Duration_S: 0.002
# Max allowed temperature before throttling
Trip_Temp_C: 85
# Set cTDP to normal=0, down=1 or up=2 (EXPERIMENTAL)
cTDP: 0
## Settings to apply while connected to AC power
[AC]
# Update the registers every this many seconds
Update_Rate_s: 5
# Max package power for time window #1
PL1_Tdp_W: 44
# Time window #1 duration
PL1_Duration_s: 28
# Max package power for time window #2
PL2_Tdp_W: 44
# Time window #2 duration
PL2_Duration_S: 0.002
# Max allowed temperature before throttling
Trip_Temp_C: 95
# Set HWP energy performance hints to 'performance' on high load (EXPERIMENTAL)
HWP_Mode: False
# Set cTDP to normal=0, down=1 or up=2 (EXPERIMENTAL)
cTDP: 0
[UNDERVOLT]
# CPU core voltage offset (mV)
CORE: -105
# Integrated GPU voltage offset (mV)
GPU: -85
# CPU cache voltage offset (mV)
CACHE: -105
# System Agent voltage offset (mV)
UNCORE: -85
# Analog I/O voltage offset (mV)
ANALOGIO: 0

View File

@@ -0,0 +1,7 @@
[gitlab.com_paulcarroty_vscodium_repo]
name=gitlab.com_paulcarroty_vscodium_repo
baseurl=https://paulcarroty.gitlab.io/vscodium-deb-rpm-repo/rpms/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://gitlab.com/paulcarroty/vscodium-deb-rpm-repo/-/raw/master/pub.gpg

View File

@@ -0,0 +1,2 @@
#https://www.2daygeek.com/remove-delete-old-unused-kernels-centos-fedora-rhel/
installonly_limit=3

View File

@@ -0,0 +1,16 @@
---
- name: configuration | include vars
ansible.builtin.include_vars:
file: vars/{{ ansible_facts['nodename'] }}.yml
- name: configuration | create chezmoi directory
ansible.builtin.file:
state: directory
path: ~/.config/chezmoi
mode: 0700
- name: configuration | templating chezmoi.toml
ansible.builtin.template:
src: chezmoi.toml.j2
dest: ~/.config/chezmoi/chezmoi.toml
mode: 0600

View File

@@ -0,0 +1,18 @@
---
- name: gnome | create directories
ansible.builtin.file:
state: directory
path: "{{ item }}"
mode: 0700
loop:
- "{{ fonts_dir }}"
- "{{ icons_dir }}"
- name: gnome | download nerd fonts
ansible.builtin.get_url:
url: "{{ item }}"
dest: "{{ fonts_dir }}"
mode: 0700
loop:
- https://github.com/ryanoasis/nerd-fonts/raw/master/patched-fonts/FiraCode/Retina/complete/Fira%20Code%20Retina%20Nerd%20Font%20Complete.ttf
- https://github.com/ryanoasis/nerd-fonts/raw/master/patched-fonts/FiraCode/Retina/complete/Fira%20Code%20Retina%20Nerd%20Font%20Complete%20Mono.ttf

View File

@@ -0,0 +1,13 @@
---
- name: gpg | create directory
ansible.builtin.file:
state: directory
path: ~/.gnupg
mode: 0700
# https://github.com/drduh/YubiKey-Guide#using-keys
- name: gpg | get gpg configuration
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/drduh/config/master/gpg.conf
dest: ~/.gnupg/gpg.conf
mode: 0600

View File

@@ -0,0 +1,56 @@
---
- ansible.builtin.include_tasks: system.yml
tags:
- system
- ansible.builtin.include_tasks: repositories.yml
tags:
- packages
- ansible.builtin.include_tasks: packages-prerequisites.yml
tags:
- packages
- ansible.builtin.include_tasks: packages-common.yml
tags:
- packages
- ansible.builtin.include_tasks: packages-claude-fixe-fedora.yml
tags:
- packages
when: ansible_facts['nodename'] == "claude-fixe-fedora"
- ansible.builtin.include_tasks: packages-claude-thinkpad-fedora.yml
tags:
- packages
when: ansible_facts['nodename'] == "claude-thinkpad-fedora"
- ansible.builtin.include_tasks: packages-post.yml
tags:
- packages
- ansible.builtin.include_tasks: chezmoi.yml
tags:
- chezmoi
- ansible.builtin.include_tasks: gpg.yml
tags:
- gpg
- ansible.builtin.include_tasks: shell.yml
tags:
- shell
- ansible.builtin.include_tasks: gnome.yml
tags:
- gnome
- ansible.builtin.include_tasks: nfs.yml
tags:
- nfs
when: ansible_facts['nodename'] == "claude-fixe-fedora"
- ansible.builtin.include_tasks: wireguard.yml
tags:
- wireguard
when: ansible_facts['nodename'] == "claude-thinkpad-fedora"

View File

@@ -0,0 +1,65 @@
---
- name: nfs | create root directory
ansible.builtin.file:
state: directory
path: "{{ mnt_dir }}"
mode: 0777
become: true
- name: nfs | create directories
ansible.builtin.file:
state: directory
path: "{{ item.path }}"
mode: 0775
loop: "{{ nfs_shares }}"
become: true
- name: nfs | mount shares
ansible.builtin.mount:
state: present
path: "{{ item.path }}"
src: "{{ item.src }}"
fstype: nfs4
opts: _netdev
with_items: "{{ nfs_shares }}"
become: true
- name: nfs | create links dir
ansible.builtin.file:
state: directory
path: "{{ nas_dir }}"
mode: 0700
- name: nfs | stat music folder
ansible.builtin.stat:
path: ~/Music
register: music
- name: nfs | remove music folder
ansible.builtin.file:
path: ~/Music
state: absent
when: music.stat.isdir is defined and music.stat.isdir
- name: nfs | stat videos folder
ansible.builtin.stat:
path: ~/Videos
register: videos
- name: nfs | remove videos folder
ansible.builtin.file:
path: ~/Videos
state: absent
when: videos.stat.isdir is defined and videos.stat.isdir
- name: nfs | stat music folder
ansible.builtin.stat:
path: ~/Music
register: music
- name: nfs | create links
ansible.builtin.file:
state: link
src: "{{ item.path }}"
dest: "{{ item.link }}"
with_items: "{{ nfs_shares }}"

View File

@@ -0,0 +1,19 @@
---
- name: packages-claude-fixe-fedora | dnf
ansible.builtin.dnf:
name:
- akmod-nvidia
- libva-utils
- libva-vdpau-driver
- handbrake
- vdpauinfo
- mkvtoolnix-gui
become: true
- name: packages-claude-fixe-fedora | brew
community.general.homebrew:
name:
- jpeg-archive
- parallel
path: /home/{{ lookup('env', 'USER') }}/.linuxbrew/bin
state: present

View File

@@ -0,0 +1,14 @@
---
- name: packages-claude-thinkpad-fedora | dnf
ansible.builtin.dnf:
name:
- tlp
- wireguard-tools
become: true
- name: packages-claude-thinkpad-fedora | tlp-ui
ansible.builtin.pip:
name:
- git+https://github.com/d4nj1/TLPUI.git
state: present
become: true

View File

@@ -0,0 +1,158 @@
---
- name: packages-common | dnf
ansible.builtin.dnf:
name:
- codium
- mpv
- resilio-sync
- gnome-tweak-tool
- la-capitaine-cursor-theme
- git
- fish
- alacritty
- redhat-rpm-config
- python3-devel
- python3-virtualenv
- ffmpeg-libs
- nano
- nfs-utils
- libgtop2-devel
- fuse-exfat
- exfat-utils
- openssl
- openssl-devel
- libacl-devel
- libicu-devel
- gcc-c++
- picard
- pinta
- calibre
- mediawriter
- hugo
- stress
- vlc
- p7zip
- p7zip-plugins
- lsd
- bat
- fzf
- fd-find
- remmina
- yp-tools
- ffmpeg
- deadbeef
- nmap
- jq
- gnupg
- steam
- npm
- ShellCheck
- gnome-extensions-app
- neovim
- brave-browser
- starship
- tmux
- cawbird
- age
- vorta
- mediainfo
- discord
- joplin
- librewolf
- go-task
- kopia
- kopia-ui
state: present
update_cache: true
become: true
- name: packages-common | python
ansible.builtin.pip:
name:
- ansible-lint
- borgbackup
- yt-dlp
- s-tui
- pylint
- pre-commit
- comictagger
- virtualfish
state: present
extra_args: --user
- name: packages-common | flatpak
community.general.flatpak:
name: "{{ item }}"
state: present
loop:
- https://dl.flathub.org/repo/appstream/com.bitwarden.desktop.flatpakref
become: true
- name: packages-common | brew
community.general.homebrew:
name:
- minio/stable/mc
- kubectl
- helm
- kustomize
- fluxcd/tap/flux
- weaveworks/tap/gitops
- sops
- gh
- derailed/popeye/popeye
- chezmoi
path: /home/{{ lookup('env', 'USER') }}/.linuxbrew/bin
state: present
update_homebrew: true
- name: packages-common | AppImage | Directories
ansible.builtin.file:
path: "{{ item }}"
state: directory
loop:
- "/home/{{ lookup('env', 'USER') }}/Apps"
- name: packages-common | AppImage | OpenLens | Check directory
ansible.builtin.stat:
path: /home/{{ lookup('env', 'USER') }}/Apps/OpenLens
register: openlens
- block:
- name: packages-common | AppImage | OpenLens | Create directory
ansible.builtin.file:
path: /home/{{ lookup('env', 'USER') }}/Apps/OpenLens
state: directory
- name: packages-common | AppImage | OpenLens | Get latest version
ansible.builtin.shell:
cmd: VERSION=$(curl -sX GET "https://api.github.com/repos/MuhammedKalkan/OpenLens/releases/latest" | jq --raw-output '.tag_name'); printf "%s" "${VERSION#*v}"
register: openlens_version
changed_when: false
- name: packages-common | AppImage | OpenLens | Download Binary
ansible.builtin.get_url:
url: https://github.com/MuhammedKalkan/OpenLens/releases/download/v{{ openlens_version.stdout }}/OpenLens-{{ openlens_version.stdout }}.AppImage
dest: /home/{{ lookup('env', 'USER') }}/Apps/OpenLens/OpenLens-{{ openlens_version.stdout }}.AppImage
mode: 0755
- name: packages-common | AppImage | OpenLens | Symlink
ansible.builtin.file:
src: /home/{{ lookup('env', 'USER') }}/Apps/OpenLens/OpenLens-{{ openlens_version.stdout }}.AppImage
dest: /home/{{ lookup('env', 'USER') }}/Apps/OpenLens/OpenLens.AppImage
state: link
mode: 0755
- name: packages-common | AppImage | OpenLens | Gnome Desktop
ansible.builtin.template:
src: application.desktop
dest: /home/{{ lookup('env', 'USER') }}/.local/share/applications/{{ item.name }}
mode: 0644
loop:
- {
name: "OpenLens",
comment: "The Kubernetes IDE",
path: "/home/{{ lookup('env', 'USER') }}/Apps/OpenLens",
command: "OpenLens.AppImage",
categories: "Programming;",
}
#when: not openlens.stat.exists

View File

@@ -0,0 +1,14 @@
---
- name: packages-post | modify resilio-sync service file
ansible.builtin.replace:
path: /usr/lib/systemd/user/resilio-sync.service
regexp: "multi-user"
replace: "default"
become: true
- name: packages-post | activate resilio-sync service
ansible.builtin.systemd:
name: resilio-sync
scope: user
state: started
enabled: true

View File

@@ -0,0 +1,18 @@
---
- name: packages-prerequisites | clone homebrew GitHub repo
ansible.builtin.git:
repo: "https://github.com/Homebrew/brew"
dest: "/home/{{ lookup('env', 'USER') }}/.linuxbrew/Homebrew"
version: "master"
- name: packages-prerequisites | create bin directory for homebrew
ansible.builtin.file:
path: "/home/{{ lookup('env', 'USER') }}/.linuxbrew/bin"
state: directory
mode: 0775
- name: packages-prerequisites | create a symbolic link for brew
ansible.builtin.file:
src: "/home/{{ lookup('env', 'USER') }}/.linuxbrew/Homebrew/bin/brew"
dest: "/home/{{ lookup('env', 'USER') }}/.linuxbrew/bin/brew"
state: link

View File

@@ -0,0 +1,114 @@
---
- name: repositories | enable the RPM Fusion repository
ansible.builtin.dnf:
name: "{{ item }}"
state: present
disable_gpg_check: true
loop:
- https://mirrors.rpmfusion.org/free/fedora/rpmfusion-free-release-{{ ansible_distribution_major_version }}.noarch.rpm
- https://mirrors.rpmfusion.org/nonfree/fedora/rpmfusion-nonfree-release-{{ ansible_distribution_major_version }}.noarch.rpm
become: true
- name: repositories | enable copr repositories
ansible.builtin.command:
cmd: dnf copr enable -y {{ item.repo }}
creates: "{{ item.file }}"
loop:
- {
repo: "tomaszgasior/mushrooms",
file: "/etc/yum.repos.d/_copr:copr.fedorainfracloud.org:tomaszgasior:mushrooms.repo",
}
- {
repo: "taw/joplin",
file: "/etc/yum.repos.d/_copr:copr.fedorainfracloud.org:taw:joplin.repo",
}
become: true
when: ansible_facts['nodename'] == "claude-fixe-fedora"
- name: repositories | copy yum repo files
ansible.builtin.copy:
src: "yum/{{ item }}"
dest: "/etc/yum.repos.d/{{ item }}"
mode: 0644
loop:
- vscodium.repo
become: true
- name: repositories | resilio sync - import repository GPG key
ansible.builtin.rpm_key:
state: present
key: https://linux-packages.resilio.com/resilio-sync/key.asc
become: true
- name: repositories | resilio sync - add repository
ansible.builtin.yum_repository:
name: rslsync
description: Resilio Sync Repository
baseurl: https://linux-packages.resilio.com/resilio-sync/rpm/$basearch
gpgcheck: true
become: true
- name: repositories | brave - check presence
ansible.builtin.stat:
path: /etc/yum.repos.d/brave-browser-rpm-release.s3.brave.com_x86_64_.repo
register: brave
- name: repositories | brave - add repository
ansible.builtin.command:
cmd: dnf config-manager --add-repo https://brave-browser-rpm-release.s3.brave.com/x86_64/
warn: false
args:
creates: /etc/yum.repos.d/brave-browser-rpm-release.s3.brave.com_x86_64_.repo
become: true
when: not brave.stat.exists
- name: repositories | brave - import asc
ansible.builtin.command:
cmd: rpm --import https://brave-browser-rpm-release.s3.brave.com/brave-core.asc
warn: false
become: true
when: not brave.stat.exists
- name: repositories | librewolf - check presence
ansible.builtin.stat:
path: /etc/yum.repos.d/rpm.librewolf.net.repo
register: librewolf
- name: repositories | librewolf - add repository
ansible.builtin.command:
cmd: dnf config-manager --add-repo https://rpm.librewolf.net
warn: false
args:
creates: /etc/yum.repos.d/rpm.librewolf.net.repo
become: true
when: not librewolf.stat.exists
- name: repositories | librewolf - import asc
ansible.builtin.command:
cmd: rpm --import https://keys.openpgp.org/vks/v1/by-fingerprint/034F7776EF5E0C613D2F7934D29FBD5F93C0CFC3
become: true
when: not librewolf.stat.exists
- name: repositories | kopia - check presence
ansible.builtin.stat:
path: /etc/yum.repos.d/kopia.repo
register: kopia
- name: repositories | kopia - import asc
ansible.builtin.command:
cmd: rpm --import https://kopia.io/signing-key
become: true
when: not kopia.stat.exists
- name: repositories | kopia - add repository
ansible.builtin.blockinfile:
path: /etc/yum.repos.d/kopia.repo
block: |
[Kopia]
name=Kopia
baseurl=http://packages.kopia.io/rpm/stable/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://kopia.io/signing-key
create: true
become: true

View File

@@ -0,0 +1,15 @@
---
- name: scripts | create directory
ansible.builtin.file:
state: directory
path: "~/.local/scripts"
mode: 0700
- name: scripts | copy scripts
ansible.builtin.copy:
src: "scripts/{{ item }}"
dest: "~/.local/scripts"
mode: 0755
with_items:
- backup-local-usb-disk-one.bash
- backup-local-usb-disk-two.bash

View File

@@ -0,0 +1,6 @@
---
- name: shell | make Fish default shell
ansible.builtin.user:
name: "{{ lookup('env', 'USER') }}"
shell: /usr/bin/fish
become: true

View File

@@ -0,0 +1,28 @@
---
- name: system | disable password sudo
ansible.builtin.lineinfile:
dest: /etc/sudoers
state: present
regexp: "^%wheel"
line: "%wheel ALL=(ALL) NOPASSWD: ALL"
validate: visudo -cf %s
become: true
- name: system | remove old unused kernels
ansible.builtin.lineinfile:
dest: /etc/yum.conf
state: present
line: "installonly_limit=3"
create: true
mode: 0644
become: true
- name: system | get better download speed with DNF
ansible.builtin.blockinfile:
path: /etc/dnf/dnf.conf
block: |
defaultyes=True
deltarpm=True
install_weak_deps=False
max_parallel_downloads={{ ansible_processor_vcpus | default('8') }}
become: true

View File

@@ -0,0 +1,6 @@
---
- name: wireguard | copy wireguard configuration
ansible.builtin.template:
src: wireguard/{{ ansible_facts['nodename'] }}.conf
dest: ~/wireguard.conf
mode: 0600

View File

@@ -0,0 +1,9 @@
[Desktop Entry]
Name={{ item.name }}
StartupWMClass={{ item.name }}
Comment={{ item.comment }}
Exec={{ item.path }}/{{ item.command }}
Type=Application
Categories={{ item.categories }}
Path={{ item.path }}
X-Desktop-File-Install-Version=0.26

View File

@@ -0,0 +1,11 @@
encryption = "age"
[age]
identity = "/home/claude/.config/sops/age/keys.txt"
recipient = "age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg"
[data]
alacritty_font_size = {{ alacritty.font_size }}
alacritty_window_columns = {{ alacritty.window_columns }}
alacritty_window_lines = {{ alacritty.window_lines }}
remmina_font_size = {{ remmina.font_size }}

View File

@@ -0,0 +1,10 @@
[Interface]
Address = 10.10.0.4/32
ListenPort = 51820
PrivateKey = {{ wireguard_private_key }}
DNS = 192.168.8.1,{{ secret_domain }}
[Peer]
PublicKey = K7kgSuPwH2NA7FeLHwvGMX02kvhD8DxHgL/wflsgx34=
AllowedIPs = 0.0.0.0/0
Endpoint = services.{{ secret_domain }}:51820

View File

@@ -0,0 +1,7 @@
---
alacritty:
font_size: 11.0
window_columns: 150
window_lines: 40
remmina:
font_size: 11

View File

@@ -0,0 +1,7 @@
---
alacritty:
font_size: 9.0
window_columns: 100
window_lines: 28
remmina:
font_size: 9