mirror of
https://github.com/auricom/home-cluster.git
synced 2025-09-17 18:24:14 +02:00
chore: remove truenas ansible
This commit is contained in:
@@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: storage
|
|
||||||
become: false
|
|
||||||
gather_facts: true
|
|
||||||
any_errors_fatal: true
|
|
||||||
roles:
|
|
||||||
- role: storage
|
|
@@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: truenas-instances
|
|
||||||
become: false
|
|
||||||
gather_facts: true
|
|
||||||
any_errors_fatal: true
|
|
||||||
roles:
|
|
||||||
- role: truenas
|
|
@@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
homelab_homedir: "/mnt/{{ pool_name }}/home/homelab"
|
|
||||||
backups_dir: "/mnt/{{ pool_name }}/backups/"
|
|
||||||
telegraf_dir: "{{ homelab_homedir }}/telegraf"
|
|
||||||
scripts_dir: "{{ homelab_homedir }}/scripts"
|
|
||||||
scrutiny_bin: scrutiny-collector-metrics
|
|
||||||
scrutiny_dir: "{{ homelab_homedir }}/scrutiny"
|
|
||||||
certificates_dir: "{{ homelab_homedir }}/letsencrypt/{{ secret_domain }}"
|
|
||||||
|
|
||||||
ping_ip: 192.168.8.1
|
|
||||||
wg_interface: wg0-client
|
|
||||||
dns_hostname: services.{{ secret_domain }}
|
|
@@ -1,107 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# clearempty.py - Koen Vermeer <k.vermeer@eyehospital.nl>
|
|
||||||
# Inspired by rollup.py by Arno Hautala <arno@alum.wpi.edu>
|
|
||||||
# modifications by Arno Hautala
|
|
||||||
# This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
|
|
||||||
# (CC BY-SA-3.0) http://creativecommons.org/licenses/by-sa/3.0/
|
|
||||||
|
|
||||||
# This script removes empty snapshots, based on their 'used' property.
|
|
||||||
# Note that one snapshot's 'used' value may change when another snapshot is
|
|
||||||
# destroyed. This script iteratively destroys the oldest empty snapshot. It
|
|
||||||
# does not remove the latest snapshot of each dataset or manual snapshots
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import argparse
|
|
||||||
import sys
|
|
||||||
from collections import defaultdict
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Removes empty auto snapshots.')
|
|
||||||
parser.add_argument('datasets', nargs='+', help='the root dataset(s) from which to remove snapshots')
|
|
||||||
parser.add_argument('--test', '-t', action="store_true", default=False, help='only display the snapshots that would be deleted, without actually deleting them. Note that due to dependencies between snapshots, this may not match what would really happen.')
|
|
||||||
parser.add_argument('--recursive', '-r', action="store_true", default=False, help='recursively removes snapshots from nested datasets')
|
|
||||||
parser.add_argument('--prefix', '-p', action='append', help='list of snapshot name prefixes that will be considered')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if not args.prefix:
|
|
||||||
args.prefix = ['auto']
|
|
||||||
|
|
||||||
args.prefix = [prefix+"-" for prefix in set(args.prefix)]
|
|
||||||
|
|
||||||
deleted = defaultdict(lambda : defaultdict(lambda : defaultdict(int)))
|
|
||||||
|
|
||||||
snapshot_was_deleted = True
|
|
||||||
|
|
||||||
while snapshot_was_deleted:
|
|
||||||
snapshot_was_deleted = False
|
|
||||||
snapshots = defaultdict(lambda : defaultdict(lambda : defaultdict(int)))
|
|
||||||
|
|
||||||
# Get properties of all snapshots of the selected datasets
|
|
||||||
for dataset in args.datasets:
|
|
||||||
subp = subprocess.Popen(["zfs", "get", "-Hrpo", "name,property,value", "type,creation,used,freenas:state", dataset], stdout=subprocess.PIPE)
|
|
||||||
zfs_snapshots = subp.communicate()[0]
|
|
||||||
if subp.returncode:
|
|
||||||
print("zfs get failed with RC=%s" % subp.returncode)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
for snapshot in zfs_snapshots.splitlines():
|
|
||||||
name,property,value = snapshot.decode().split('\t',3)
|
|
||||||
|
|
||||||
# if the rollup isn't recursive, skip any snapshots from child datasets
|
|
||||||
if not args.recursive and not name.startswith(dataset+"@"):
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
dataset,snapshot = name.split('@',2)
|
|
||||||
except ValueError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
snapshots[dataset][snapshot][property] = value
|
|
||||||
|
|
||||||
# Ignore non-snapshots and not-auto-snapshots
|
|
||||||
# Remove already destroyed snapshots
|
|
||||||
for dataset in list(snapshots.keys()):
|
|
||||||
latest = None
|
|
||||||
latestNEW = None
|
|
||||||
for snapshot in sorted(snapshots[dataset], key=lambda snapshot: snapshots[dataset][snapshot]['creation'], reverse=True):
|
|
||||||
if not any(map(snapshot.startswith, args.prefix)) \
|
|
||||||
or snapshots[dataset][snapshot]['type'] != "snapshot":
|
|
||||||
del snapshots[dataset][snapshot]
|
|
||||||
continue
|
|
||||||
if not latest:
|
|
||||||
latest = snapshot
|
|
||||||
del snapshots[dataset][snapshot]
|
|
||||||
continue
|
|
||||||
if not latestNEW and snapshots[dataset][snapshot]['freenas:state'] == 'NEW':
|
|
||||||
latestNEW = snapshot
|
|
||||||
del snapshots[dataset][snapshot]
|
|
||||||
continue
|
|
||||||
if snapshots[dataset][snapshot]['freenas:state'] == 'LATEST':
|
|
||||||
del snapshots[dataset][snapshot]
|
|
||||||
continue
|
|
||||||
if snapshots[dataset][snapshot]['used'] != '0' \
|
|
||||||
or snapshot in list(deleted[dataset].keys()):
|
|
||||||
del snapshots[dataset][snapshot]
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Stop if no snapshots are in the list
|
|
||||||
if not snapshots[dataset]:
|
|
||||||
del snapshots[dataset]
|
|
||||||
continue
|
|
||||||
|
|
||||||
# destroy the most recent empty snapshot
|
|
||||||
snapshot = max(snapshots[dataset], key=lambda snapshot: snapshots[dataset][snapshot]['creation'])
|
|
||||||
if not args.test:
|
|
||||||
# destroy the snapshot
|
|
||||||
subprocess.call(["zfs", "destroy", dataset+"@"+snapshot])
|
|
||||||
|
|
||||||
deleted[dataset][snapshot] = snapshots[dataset][snapshot]
|
|
||||||
snapshot_was_deleted = True
|
|
||||||
|
|
||||||
for dataset in sorted(deleted.keys()):
|
|
||||||
if not deleted[dataset]:
|
|
||||||
continue
|
|
||||||
print(dataset)
|
|
||||||
for snapshot in sorted(deleted[dataset].keys()):
|
|
||||||
print("\t", snapshot, deleted[dataset][snapshot]['used'])
|
|
@@ -1,262 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# rollup.py - Arno Hautala <arno@alum.wpi.edu>
|
|
||||||
# This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
|
|
||||||
# (CC BY-SA-3.0) http://creativecommons.org/licenses/by-sa/3.0/
|
|
||||||
|
|
||||||
# For the latest version, visit:
|
|
||||||
# https://github.com/fracai/zfs-rollup
|
|
||||||
# https://bitbucket.org/fracai/zfs-rollup
|
|
||||||
|
|
||||||
# A snapshot pruning script, similar in behavior to Apple's TimeMachine
|
|
||||||
# Keep hourly snapshots for the last day, daily for the last week, and weekly thereafter.
|
|
||||||
|
|
||||||
# TODO:
|
|
||||||
# rollup based on local time, not UTC
|
|
||||||
# requires pytz, or manually determining and converting time offsets
|
|
||||||
# improve documentation
|
|
||||||
|
|
||||||
# TEST:
|
|
||||||
|
|
||||||
import datetime
|
|
||||||
import calendar
|
|
||||||
import time
|
|
||||||
import subprocess
|
|
||||||
import argparse
|
|
||||||
import sys
|
|
||||||
from collections import defaultdict
|
|
||||||
|
|
||||||
intervals = {}
|
|
||||||
intervals['hourly'] = { 'max':24, 'abbreviation':'h', 'reference':'%Y-%m-%d %H' }
|
|
||||||
intervals['daily'] = { 'max': 7, 'abbreviation':'d', 'reference':'%Y-%m-%d' }
|
|
||||||
intervals['weekly'] = { 'max': 0, 'abbreviation':'w', 'reference':'%Y-%W' }
|
|
||||||
intervals['monthly'] = { 'max':12, 'abbreviation':'m', 'reference':'%Y-%m' }
|
|
||||||
intervals['yearly'] = { 'max':10, 'abbreviation':'y', 'reference':'%Y' }
|
|
||||||
|
|
||||||
modifiers = {
|
|
||||||
'M' : 1,
|
|
||||||
'H' : 60,
|
|
||||||
'h' : 60,
|
|
||||||
'd' : 60*24,
|
|
||||||
'w' : 60*24*7,
|
|
||||||
'm' : 60*24*28,
|
|
||||||
'y' : 60*24*365,
|
|
||||||
}
|
|
||||||
|
|
||||||
used_intervals = {
|
|
||||||
'hourly': intervals['hourly'],
|
|
||||||
'daily' : intervals['daily'],
|
|
||||||
'weekly': intervals['weekly']
|
|
||||||
}
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Prune excess snapshots, keeping hourly for the last day, daily for the last week, and weekly thereafter.')
|
|
||||||
parser.add_argument('datasets', nargs='+', help='The root dataset(s) from which to prune snapshots')
|
|
||||||
parser.add_argument('-t', '--test', action="store_true", default=False, help='Only display the snapshots that would be deleted, without actually deleting them')
|
|
||||||
parser.add_argument('-v', '--verbose', action="store_true", default=False, help='Display verbose information about which snapshots are kept, pruned, and why')
|
|
||||||
parser.add_argument('-r', '--recursive', action="store_true", default=False, help='Recursively prune snapshots from nested datasets')
|
|
||||||
parser.add_argument('--prefix', '-p', action='append', help='list of snapshot name prefixes that will be considered')
|
|
||||||
parser.add_argument('-c', '--clear', action="store_true", default=False, help='remove all snapshots')
|
|
||||||
parser.add_argument('-i', '--intervals',
|
|
||||||
help="Modify and define intervals with which to keep and prune snapshots. Either name existing intervals ("+
|
|
||||||
", ".join(sorted(intervals, key=lambda interval: modifiers[intervals[interval]['abbreviation']]))+"), "+
|
|
||||||
"modify the number of those to store (hourly:12), or define new intervals according to interval:count (2h:12). "+
|
|
||||||
"Multiple intervals may be specified if comma seperated (hourly,daily:30,2h:12). Available modifier abbreviations are: "+
|
|
||||||
", ".join(sorted(modifiers, key=modifiers.get))
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if not args.prefix:
|
|
||||||
args.prefix = ['auto']
|
|
||||||
|
|
||||||
args.prefix = [prefix+"-" for prefix in set(args.prefix)]
|
|
||||||
|
|
||||||
if args.test:
|
|
||||||
args.verbose = True
|
|
||||||
|
|
||||||
if args.intervals:
|
|
||||||
used_intervals = {}
|
|
||||||
|
|
||||||
for interval in args.intervals.split(','):
|
|
||||||
if interval.count(':') == 1:
|
|
||||||
period,count = interval.split(':')
|
|
||||||
|
|
||||||
try:
|
|
||||||
int(count)
|
|
||||||
except ValueError:
|
|
||||||
print("invalid count: "+count)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if period in intervals:
|
|
||||||
used_intervals[period] = intervals[period]
|
|
||||||
used_intervals[period]['max'] = count
|
|
||||||
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
if period[-1] in modifiers:
|
|
||||||
used_intervals[interval] = { 'max' : count, 'interval' : int(period[:-1]) * modifiers[period[-1]] }
|
|
||||||
else:
|
|
||||||
used_intervals[interval] = { 'max' : count, 'interval' : int(period) }
|
|
||||||
|
|
||||||
except ValueError:
|
|
||||||
print("invalid period: "+period)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
elif interval.count(':') == 0 and interval in intervals:
|
|
||||||
used_intervals[interval] = intervals[interval]
|
|
||||||
|
|
||||||
else:
|
|
||||||
print("invalid interval: "+interval)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
for interval in used_intervals:
|
|
||||||
if 'abbreviation' not in used_intervals[interval]:
|
|
||||||
used_intervals[interval]['abbreviation'] = interval
|
|
||||||
|
|
||||||
snapshots = defaultdict(lambda : defaultdict(lambda : defaultdict(int)))
|
|
||||||
|
|
||||||
for dataset in args.datasets:
|
|
||||||
subp = subprocess.Popen(["zfs", "get", "-Hrpo", "name,property,value", "creation,type,used,freenas:state", dataset], stdout=subprocess.PIPE)
|
|
||||||
zfs_snapshots = subp.communicate()[0]
|
|
||||||
if subp.returncode:
|
|
||||||
print("zfs get failed with RC=%s" % subp.returncode)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
for snapshot in zfs_snapshots.splitlines():
|
|
||||||
name,property,value = snapshot.decode().split('\t',3)
|
|
||||||
|
|
||||||
# if the rollup isn't recursive, skip any snapshots from child datasets
|
|
||||||
if not args.recursive and not name.startswith(dataset+"@"):
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
dataset,snapshot = name.split('@',2)
|
|
||||||
except ValueError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# enforce that this is a snapshot starting with one of the requested prefixes
|
|
||||||
if not any(map(snapshot.startswith, args.prefix)):
|
|
||||||
if property == 'creation':
|
|
||||||
print("will ignore:\t", dataset+"@"+snapshot)
|
|
||||||
|
|
||||||
snapshots[dataset][snapshot][property] = value
|
|
||||||
|
|
||||||
for dataset in list(snapshots.keys()):
|
|
||||||
latestNEW = None
|
|
||||||
latest = None
|
|
||||||
for snapshot in sorted(snapshots[dataset], key=lambda snapshot: snapshots[dataset][snapshot]['creation'], reverse=True):
|
|
||||||
if not latest:
|
|
||||||
latest = snapshot
|
|
||||||
snapshots[dataset][snapshot]['keep'] = 'RECENT'
|
|
||||||
continue
|
|
||||||
if not any(map(snapshot.startswith, args.prefix)) \
|
|
||||||
or snapshots[dataset][snapshot]['type'] != "snapshot":
|
|
||||||
snapshots[dataset][snapshot]['keep'] = '!PREFIX'
|
|
||||||
continue
|
|
||||||
if not latestNEW and snapshots[dataset][snapshot]['freenas:state'] == 'NEW':
|
|
||||||
latestNEW = snapshot
|
|
||||||
snapshots[dataset][snapshot]['keep'] = 'NEW'
|
|
||||||
continue
|
|
||||||
if snapshots[dataset][snapshot]['freenas:state'] == 'LATEST':
|
|
||||||
snapshots[dataset][snapshot]['keep'] = 'LATEST'
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not len(list(snapshots[dataset].keys())):
|
|
||||||
del snapshots[dataset]
|
|
||||||
|
|
||||||
for dataset in sorted(snapshots.keys()):
|
|
||||||
print(dataset)
|
|
||||||
|
|
||||||
sorted_snapshots = sorted(snapshots[dataset], key=lambda snapshot: snapshots[dataset][snapshot]['creation'])
|
|
||||||
most_recent = sorted_snapshots[-1]
|
|
||||||
|
|
||||||
rollup_intervals = defaultdict(lambda : defaultdict(int))
|
|
||||||
|
|
||||||
for snapshot in sorted_snapshots:
|
|
||||||
prune = True
|
|
||||||
|
|
||||||
if args.clear:
|
|
||||||
continue
|
|
||||||
|
|
||||||
epoch = snapshots[dataset][snapshot]['creation']
|
|
||||||
|
|
||||||
for interval in list(used_intervals.keys()):
|
|
||||||
if 'reference' in used_intervals[interval]:
|
|
||||||
reference = time.strftime(used_intervals[interval]['reference'], time.gmtime(float(epoch)))
|
|
||||||
|
|
||||||
if reference not in rollup_intervals[interval]:
|
|
||||||
if int(used_intervals[interval]['max']) != 0 and len(rollup_intervals[interval]) >= int(used_intervals[interval]['max']):
|
|
||||||
rollup_intervals[interval].pop(sorted(rollup_intervals[interval].keys())[0])
|
|
||||||
rollup_intervals[interval][reference] = epoch
|
|
||||||
|
|
||||||
elif 'interval' in used_intervals[interval]:
|
|
||||||
if int(used_intervals[interval]['max']) != 0 and len(rollup_intervals[interval]) >= int(used_intervals[interval]['max']):
|
|
||||||
rollup_intervals[interval].pop(sorted(rollup_intervals[interval].keys())[0])
|
|
||||||
|
|
||||||
if (not rollup_intervals[interval]) or int(sorted(rollup_intervals[interval].keys())[-1]) + (used_intervals[interval]['interval']*60*.9) < int(epoch):
|
|
||||||
rollup_intervals[interval][epoch] = epoch
|
|
||||||
|
|
||||||
ranges = list()
|
|
||||||
ranges.append(list())
|
|
||||||
for snapshot in sorted_snapshots:
|
|
||||||
prune = True
|
|
||||||
|
|
||||||
epoch = snapshots[dataset][snapshot]['creation']
|
|
||||||
|
|
||||||
if 'keep' in snapshots[dataset][snapshot]:
|
|
||||||
prune = False
|
|
||||||
ranges.append(list())
|
|
||||||
|
|
||||||
|
|
||||||
for interval in list(used_intervals.keys()):
|
|
||||||
if 'reference' in used_intervals[interval]:
|
|
||||||
reference = time.strftime(used_intervals[interval]['reference'], time.gmtime(float(epoch)))
|
|
||||||
if reference in rollup_intervals[interval] and rollup_intervals[interval][reference] == epoch:
|
|
||||||
prune = False
|
|
||||||
ranges.append(list())
|
|
||||||
|
|
||||||
elif 'interval' in used_intervals[interval]:
|
|
||||||
if epoch in rollup_intervals[interval]:
|
|
||||||
prune = False
|
|
||||||
ranges.append(list())
|
|
||||||
|
|
||||||
if prune or args.verbose:
|
|
||||||
print("\t","pruning\t" if prune else " \t", "@"+snapshot, end=' ')
|
|
||||||
if args.verbose:
|
|
||||||
for interval in list(used_intervals.keys()):
|
|
||||||
if 'reference' in used_intervals[interval]:
|
|
||||||
reference = time.strftime(used_intervals[interval]['reference'], time.gmtime(float(epoch)))
|
|
||||||
if reference in rollup_intervals[interval] and rollup_intervals[interval][reference] == epoch:
|
|
||||||
print(used_intervals[interval]['abbreviation'], end=' ')
|
|
||||||
else:
|
|
||||||
print('-', end=' ')
|
|
||||||
if 'interval' in used_intervals[interval]:
|
|
||||||
if epoch in rollup_intervals[interval]:
|
|
||||||
print(used_intervals[interval]['abbreviation'], end=' ')
|
|
||||||
else:
|
|
||||||
print('-', end=' ')
|
|
||||||
if 'keep' in snapshots[dataset][snapshot]:
|
|
||||||
print(snapshots[dataset][snapshot]['keep'][0], end=' ')
|
|
||||||
else:
|
|
||||||
print('-', end=' ')
|
|
||||||
print(snapshots[dataset][snapshot]['used'])
|
|
||||||
else:
|
|
||||||
print()
|
|
||||||
|
|
||||||
if prune:
|
|
||||||
ranges[-1].append(snapshot)
|
|
||||||
|
|
||||||
for range in ranges:
|
|
||||||
if not range:
|
|
||||||
continue
|
|
||||||
to_delete = dataset+'@'+range[0]
|
|
||||||
if len(range) > 1:
|
|
||||||
to_delete += '%' + range[-1]
|
|
||||||
to_delete = to_delete.replace(' ', '')
|
|
||||||
if not to_delete:
|
|
||||||
continue
|
|
||||||
if args.verbose:
|
|
||||||
print('zfs destroy ' + to_delete)
|
|
||||||
if not args.test:
|
|
||||||
# destroy the snapshot
|
|
||||||
subprocess.call(['zfs', 'destroy', to_delete])
|
|
@@ -1,28 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Runs smartctl to report current temperature of all disks.
|
|
||||||
|
|
||||||
JSON="["
|
|
||||||
|
|
||||||
DISKS=$(/sbin/sysctl -n kern.disks | cut -d= -f2)
|
|
||||||
|
|
||||||
for i in ${DISKS}
|
|
||||||
do
|
|
||||||
# Get temperature from smartctl (requires root).
|
|
||||||
[[ "${i}" = *"ada"* ]] && TEMP=$(/usr/local/sbin/smartctl -l scttemp /dev/$i | grep '^Current Temperature:' | awk '{print $3}')
|
|
||||||
[[ "${i}" = *"nvd"* ]] && DEVICE_NUMBER=$(echo ${i} | cut -c 4) && TEMP=$(smartctl -a /dev/nvme${DEVICE_NUMBER} | grep Temperature: | head -1 | awk '{print $2}')
|
|
||||||
|
|
||||||
if [ ${TEMP:-0} -gt 0 ]
|
|
||||||
then
|
|
||||||
JSON=$(echo "${JSON}{")
|
|
||||||
JSON=$(echo "${JSON}\"temperature\":${TEMP},")
|
|
||||||
JSON=$(echo "${JSON}\"disk\":\"${i}\"")
|
|
||||||
JSON=$(echo "${JSON}},")
|
|
||||||
fi
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
# Remove trailing "," on last field.
|
|
||||||
JSON=$(echo ${JSON} | sed 's/,$//')
|
|
||||||
|
|
||||||
echo -e "${JSON}]"
|
|
@@ -1,22 +0,0 @@
|
|||||||
---
|
|
||||||
- name: directories | create
|
|
||||||
ansible.builtin.file:
|
|
||||||
state: directory
|
|
||||||
path: "{{ item }}"
|
|
||||||
mode: 0775
|
|
||||||
loop:
|
|
||||||
- "{{ homelab_homedir }}/letsencrypt"
|
|
||||||
- "{{ telegraf_dir }}"
|
|
||||||
- "{{ backups_dir }}servers/{{ ansible_facts['nodename'] }}"
|
|
||||||
- "{{ scripts_dir }}"
|
|
||||||
- "{{ scrutiny_dir }}"
|
|
||||||
|
|
||||||
- name: directories | truenas
|
|
||||||
ansible.builtin.file:
|
|
||||||
state: directory
|
|
||||||
path: "{{ item }}"
|
|
||||||
mode: 0775
|
|
||||||
loop:
|
|
||||||
- "{{ backups_dir }}servers/coreelec.{{ secret_domain }}"
|
|
||||||
- "{{ backups_dir }}servers/opnsense.{{ secret_domain }}"
|
|
||||||
when: "main_nas"
|
|
@@ -1,32 +0,0 @@
|
|||||||
---
|
|
||||||
- block:
|
|
||||||
- name: jail-init | {{ outside_item.item }} | start jail
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage list | grep -q '^.*\s{{ outside_item.item }}\s.*\sdown\s.*$' && iocage start {{ outside_item.item }}
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: jail-init | {{ outside_item.item }} | create .ssh directory
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec {{ outside_item.item }} 'mkdir -p /root/.ssh; echo "" > /root/.ssh/authorized_keys; chmod 700 /root/.ssh; chmod 600 /root/.ssh/authorized_keys'
|
|
||||||
|
|
||||||
- name: jail-init | {{ outside_item.item }} | deploy ssh keys
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec {{ outside_item.item }} 'echo "{{ item }}" >> /root/.ssh/authorized_keys'
|
|
||||||
loop: "{{ public_ssh_keys }}"
|
|
||||||
|
|
||||||
- name: jail-init | {{ outside_item.item }} | activate sshd
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec {{ outside_item.item }} 'sysrc sshd_enable="YES"'
|
|
||||||
|
|
||||||
- name: jail-init | {{ outside_item.item }} | sshd permit root login
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec {{ outside_item.item }} 'echo "PermitRootLogin yes" >> /etc/ssh/sshd_config'
|
|
||||||
|
|
||||||
- name: jail-init | {{ outside_item.item }} | start sshd
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec {{ outside_item.item }} 'service sshd start'
|
|
||||||
|
|
||||||
- name: jail-init | {{ outside_item.item }} | install packages
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec {{ outside_item.item }} 'pkg install -y python39 bash sudo; ln -s /usr/local/bin/bash /bin/bash'
|
|
||||||
become: true
|
|
@@ -1,42 +0,0 @@
|
|||||||
---
|
|
||||||
- name: jails | check if jail exist
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage list --header | awk '{print $2}' | grep --word-regexp {{ item }}
|
|
||||||
loop: "{{ groups['truenas-jails'] }}"
|
|
||||||
register: jails_check
|
|
||||||
changed_when: false
|
|
||||||
failed_when: jails_check.rc != 0 and jails_check.rc != 1
|
|
||||||
|
|
||||||
- name: jails | is iocage fetch required
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
jail_missing: true
|
|
||||||
loop: "{{ jails_check.results }}"
|
|
||||||
when: item.rc == 1
|
|
||||||
|
|
||||||
- block:
|
|
||||||
- name: jails | get current FreeBSD release
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: freebsd-version | cut -d '-' -f 1-2
|
|
||||||
register: release
|
|
||||||
failed_when: release.rc != 0
|
|
||||||
|
|
||||||
- name: jails | fetch iocage template {{ release.stdout }}
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage fetch -r {{ release.stdout }}
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: jails | create jail
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage create -r {{ release.stdout }} -n {{ item.item }} dhcp=on boot=on
|
|
||||||
loop: "{{ jails_check.results }}"
|
|
||||||
when: item.rc == 1
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: jails | init jails
|
|
||||||
ansible.builtin.include_tasks: init.yml
|
|
||||||
loop: "{{ jails_check.results }}"
|
|
||||||
loop_control:
|
|
||||||
loop_var: outside_item
|
|
||||||
when: outside_item.rc == 1
|
|
||||||
|
|
||||||
when: jail_missing
|
|
@@ -1,70 +0,0 @@
|
|||||||
---
|
|
||||||
- name: jail-minio | get jail ip
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec minio_v2 ifconfig epair0b | grep 'inet' | awk -F ' ' '{ print $2 }'
|
|
||||||
changed_when: false
|
|
||||||
register: minio_jail_ip
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: jail-minio_v2 | copy letsencrypt certificate
|
|
||||||
ansible.builtin.copy:
|
|
||||||
src: /mnt/{{ pool_name }}/home/homelab/letsencrypt/xpander.ovh/{{ item.src }}
|
|
||||||
remote_src: true
|
|
||||||
dest: /mnt/{{ iocage_pool_name }}/iocage/jails/minio_v2/root/home/minio/certs/{{ item.dest }}
|
|
||||||
owner: 1002
|
|
||||||
group: 1002
|
|
||||||
mode: 0600
|
|
||||||
loop:
|
|
||||||
- { src: "fullchain.pem", dest: "public.crt" }
|
|
||||||
- { src: "key.pem", dest: "private.key" }
|
|
||||||
register: certificates
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- block:
|
|
||||||
- name: jail-minio | install minio
|
|
||||||
ansible.builtin.pkgng:
|
|
||||||
name:
|
|
||||||
- minio
|
|
||||||
- curl
|
|
||||||
state: present
|
|
||||||
register: installation
|
|
||||||
|
|
||||||
- name: jail-minio | create minio configuration in /etc/rc.conf
|
|
||||||
ansible.builtin.blockinfile:
|
|
||||||
path: /etc/rc.conf
|
|
||||||
state: present
|
|
||||||
block: |
|
|
||||||
# MINIO
|
|
||||||
minio_enable="YES"
|
|
||||||
minio_address=":9000"
|
|
||||||
minio_console_address=":9001"
|
|
||||||
minio_disks="/mnt/data"
|
|
||||||
minio_certs="/home/minio/certs"
|
|
||||||
minio_env="MINIO_ACCESS_KEY={{ minio_access_key }} MINIO_SECRET_KEY={{ minio_secret_key }}"
|
|
||||||
no_log: false
|
|
||||||
register: configuration
|
|
||||||
|
|
||||||
- name: jail-minio | restart minio service
|
|
||||||
ansible.builtin.service:
|
|
||||||
name: minio
|
|
||||||
state: restarted
|
|
||||||
enabled: true
|
|
||||||
when: configuration.changed == true or installation.changed == true or certificates.changed == true
|
|
||||||
|
|
||||||
- name: jail-minio | wait for 5 seconds
|
|
||||||
ansible.builtin.pause:
|
|
||||||
seconds: 5
|
|
||||||
|
|
||||||
- name: jail-minio | check minio service
|
|
||||||
ansible.builtin.command: curl -s localhost:9000/minio/health/live
|
|
||||||
register: curl_result
|
|
||||||
ignore_errors: true
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: jail-minio | fail if curl command failed
|
|
||||||
ansible.builtin.fail:
|
|
||||||
msg: 'Curl command failed'
|
|
||||||
when: curl_result.rc != 0
|
|
||||||
|
|
||||||
delegate_to: "{{ minio_jail_ip.stdout }}"
|
|
||||||
remote_user: root
|
|
@@ -1,32 +0,0 @@
|
|||||||
---
|
|
||||||
- block:
|
|
||||||
- name: jail-minio_v2_v2 | create zfs pools
|
|
||||||
community.general.zfs:
|
|
||||||
name: "{{ minio_pool_name }}/minio_v2"
|
|
||||||
state: present
|
|
||||||
extra_zfs_properties:
|
|
||||||
atime: off
|
|
||||||
setuid: off
|
|
||||||
|
|
||||||
- name: jail-minio_v2 | create empty data dir
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec minio_v2 mkdir -p /mnt/data
|
|
||||||
|
|
||||||
- name: jail-minio_v2 | mount data
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage fstab -a minio /mnt/{{ minio_pool_name }}/minio /mnt/data nullfs rw 0 0
|
|
||||||
|
|
||||||
- name: jail-minio_v2 | change create minio user
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec minio_v2 'pw useradd minio -u 1002 -g 1002 -d /home/minio -m'
|
|
||||||
|
|
||||||
- name: jail-minio_v2 | change owner on data dir
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec minio_v2 'chown 1002:1002 /mnt/data'
|
|
||||||
|
|
||||||
- name: jail-minio_v2 | create certificates folder
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: /mnt/{{ iocage_pool_name }}/iocage/jails/minio_v2/root/home/minio/certs
|
|
||||||
owner: 1002
|
|
||||||
group: 1002
|
|
||||||
become: true
|
|
@@ -1,77 +0,0 @@
|
|||||||
---
|
|
||||||
- name: jail-{{ outside_item.item }} | get jail ip
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec {{ outside_item.item }} ifconfig epair0b | grep 'inet' | awk -F ' ' '{ print $2 }'
|
|
||||||
changed_when: false
|
|
||||||
register: postgresql_jail_ip
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | copy letsencrypt certificate
|
|
||||||
ansible.builtin.copy:
|
|
||||||
src: /mnt/{{ pool_name }}/home/homelab/letsencrypt/xpander.ovh/{{ item.src }}
|
|
||||||
remote_src: true
|
|
||||||
dest: /mnt/{{ postgresql_pool_name }}/postgresql/data{{ hostvars[outside_item.item]['postgresql_version'] }}/{{ item.dest }}
|
|
||||||
owner: 770
|
|
||||||
group: 770
|
|
||||||
mode: 0600
|
|
||||||
loop:
|
|
||||||
- { src: "fullchain.pem", dest: "server.crt" }
|
|
||||||
- { src: "key.pem", dest: "server.key" }
|
|
||||||
register: certificates
|
|
||||||
become: true
|
|
||||||
tags:
|
|
||||||
- certificates
|
|
||||||
|
|
||||||
- block:
|
|
||||||
- name: jail-{{ outside_item.item }} | configure pg_hba
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: postgresql/pg_hba.conf
|
|
||||||
dest: /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}/pg_hba.conf
|
|
||||||
owner: postgres
|
|
||||||
group: postgres
|
|
||||||
register: pg_hba
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | postgresql configuration
|
|
||||||
community.postgresql.postgresql_set:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
value: "{{ item.value }}"
|
|
||||||
loop:
|
|
||||||
# listen to all addresses
|
|
||||||
- { name: 'listen_addresses', value: '*' }
|
|
||||||
# disable full page writes because of ZFS
|
|
||||||
- { name: 'full_page_writes', value: 'off' }
|
|
||||||
# SSL configuration
|
|
||||||
- { name: 'ssl', value: 'on' }
|
|
||||||
- { name: 'ssl_cert_file', value: 'server.crt' }
|
|
||||||
- { name: 'ssl_key_file', value: 'server.key' }
|
|
||||||
- { name: 'ssl_prefer_server_ciphers', value: 'on' }
|
|
||||||
# https://pgtune.leopard.in.ua - Web application / 12GB
|
|
||||||
- { name: 'max_connections', value: '200' }
|
|
||||||
- { name: 'shared_buffers', value: '3GB' }
|
|
||||||
- { name: 'effective_cache_size', value: '9GB' }
|
|
||||||
- { name: 'maintenance_work_mem', value: '768MB' }
|
|
||||||
- { name: 'checkpoint_completion_target', value: '0.9' }
|
|
||||||
- { name: 'wal_buffers', value: '16MB' }
|
|
||||||
- { name: 'random_page_cost', value: '1.1' }
|
|
||||||
- { name: 'effective_io_concurrency', value: '200' }
|
|
||||||
- { name: 'work_mem', value: '7864kB' }
|
|
||||||
- { name: 'huge_pages', value: 'off' }
|
|
||||||
- { name: 'min_wal_size', value: '1GB' }
|
|
||||||
- { name: 'max_wal_size', value: '4GB' }
|
|
||||||
loop_control:
|
|
||||||
loop_var: item
|
|
||||||
become: true
|
|
||||||
vars:
|
|
||||||
ansible_become_user: postgres
|
|
||||||
register: pg_conf
|
|
||||||
|
|
||||||
- name: restart postgresql
|
|
||||||
ansible.builtin.service:
|
|
||||||
name: postgresql
|
|
||||||
state: restarted
|
|
||||||
when: certificates.changed or pg_hba.changed or pg_conf.changed
|
|
||||||
tags:
|
|
||||||
- certificates
|
|
||||||
|
|
||||||
delegate_to: "{{ postgresql_jail_ip.stdout }}"
|
|
||||||
remote_user: root
|
|
@@ -1,134 +0,0 @@
|
|||||||
---
|
|
||||||
- name: jail-{{ outside_item.item }} | get jail ip
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec {{ outside_item.item }} ifconfig epair0b | grep 'inet' | awk -F ' ' '{ print $2 }'
|
|
||||||
changed_when: false
|
|
||||||
register: postgresql_jail_ip
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- block:
|
|
||||||
- name: jail-{{ outside_item.item }} | create zfs pools
|
|
||||||
community.general.zfs:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
loop:
|
|
||||||
- "{{ postgresql_pool_name }}/postgresql"
|
|
||||||
- "{{ postgresql_pool_name }}/postgresql/data{{ hostvars[outside_item.item]['postgresql_version'] }}"
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | configure zfs pool postgresql
|
|
||||||
community.general.zfs:
|
|
||||||
name: "{{ postgresql_pool_name }}/postgresql"
|
|
||||||
state: present
|
|
||||||
extra_zfs_properties:
|
|
||||||
atime: off
|
|
||||||
setuid: off
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | create empty data{{ hostvars[outside_item.item]['postgresql_version'] }} dir
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage exec {{ outside_item.item }} mkdir -p /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | mount data{{ hostvars[outside_item.item]['postgresql_version'] }}
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage fstab -a {{ outside_item.item }} /mnt/{{ postgresql_pool_name }}/postgresql/data{{ hostvars[outside_item.item]['postgresql_version'] }} /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }} nullfs rw 0 0
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- block:
|
|
||||||
- name: jail-{{ outside_item.item }} | packages
|
|
||||||
community.general.pkgng:
|
|
||||||
name:
|
|
||||||
- postgresql{{ hostvars[outside_item.item]['postgresql_version'] }}-server
|
|
||||||
- postgresql{{ hostvars[outside_item.item]['postgresql_version'] }}-contrib
|
|
||||||
- postgresql{{ hostvars[outside_item.item]['postgresql_version'] }}-client
|
|
||||||
- py39-pip
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | pip packages
|
|
||||||
ansible.builtin.pip:
|
|
||||||
name: psycopg2
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | change postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }} mod
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}
|
|
||||||
owner: postgres
|
|
||||||
group: postgres
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | initdb
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: su -m postgres -c 'initdb -E UTF-8 /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}'
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | move base and pg_wal
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: su -m postgres -c 'mv /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}/{{ item }} /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}/{{ item }}0'
|
|
||||||
loop:
|
|
||||||
- base
|
|
||||||
- pg_wal
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | create base and pg_wal empty dirs
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}/{{ item }}
|
|
||||||
state: directory
|
|
||||||
owner: postgres
|
|
||||||
group: postgres
|
|
||||||
loop:
|
|
||||||
- base
|
|
||||||
- pg_wal
|
|
||||||
|
|
||||||
delegate_to: "{{ postgresql_jail_ip.stdout }}"
|
|
||||||
remote_user: root
|
|
||||||
|
|
||||||
- block:
|
|
||||||
- name: jail-{{ outside_item.item }} | create missing zfs pools
|
|
||||||
community.general.zfs:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
loop:
|
|
||||||
- "{{ postgresql_pool_name }}/postgresql/data{{ hostvars[outside_item.item]['postgresql_version'] }}/base"
|
|
||||||
- "{{ postgresql_pool_name }}/postgresql/data{{ hostvars[outside_item.item]['postgresql_version'] }}/pg_wal"
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | mount base
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: iocage fstab -a {{ outside_item.item }} /mnt/{{ postgresql_pool_name }}/postgresql/data{{ hostvars[outside_item.item]['postgresql_version'] }}/{{ item }} /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}/{{ item }} nullfs rw 0 0
|
|
||||||
loop:
|
|
||||||
- base
|
|
||||||
- pg_wal
|
|
||||||
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- block:
|
|
||||||
- name: jail-{{ outside_item.item }} | move base and pg_wal content to mounts
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: mv /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}/{{ item }}0/* /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}/{{ item }}/; rmdir /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}/{{ item }}0
|
|
||||||
loop:
|
|
||||||
- base
|
|
||||||
- pg_wal
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | change mod
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: /var/db/postgres/data{{ hostvars[outside_item.item]['postgresql_version'] }}/{{ item }}
|
|
||||||
state: directory
|
|
||||||
owner: postgres
|
|
||||||
group: postgres
|
|
||||||
recurse: true
|
|
||||||
loop:
|
|
||||||
- base
|
|
||||||
- pg_wal
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | enable postgresql service
|
|
||||||
community.general.sysrc:
|
|
||||||
name: postgresql_enable
|
|
||||||
state: present
|
|
||||||
value: "YES"
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | start postgresql service
|
|
||||||
ansible.builtin.service:
|
|
||||||
name: postgresql
|
|
||||||
state: started
|
|
||||||
|
|
||||||
- name: jail-{{ outside_item.item }} | change postgresql password
|
|
||||||
postgresql_query:
|
|
||||||
login_user: postgres
|
|
||||||
query: ALTER USER postgres PASSWORD '{{ postgresql_password }}'
|
|
||||||
|
|
||||||
delegate_to: "{{ postgresql_jail_ip.stdout }}"
|
|
||||||
remote_user: root
|
|
@@ -1,51 +0,0 @@
|
|||||||
---
|
|
||||||
- ansible.builtin.include_tasks: directories.yml
|
|
||||||
|
|
||||||
- ansible.builtin.include_tasks: scripts.yml
|
|
||||||
|
|
||||||
- ansible.builtin.include_tasks: scrutiny.yml
|
|
||||||
|
|
||||||
- ansible.builtin.include_tasks: telegraf.yml
|
|
||||||
|
|
||||||
- ansible.builtin.include_tasks: wireguard.yml
|
|
||||||
when: "main_nas == false"
|
|
||||||
|
|
||||||
- ansible.builtin.include_tasks: jails/main.yml
|
|
||||||
when: "main_nas"
|
|
||||||
|
|
||||||
- block:
|
|
||||||
- ansible.builtin.shell:
|
|
||||||
cmd: test -f /mnt/{{ minio_pool_name }}/minio_v2/.minio.sys/config/config.json/xl.meta
|
|
||||||
register: minio_data_exists
|
|
||||||
become: true
|
|
||||||
changed_when: false
|
|
||||||
failed_when: minio_data_exists.rc != 0 and minio_data_exists.rc != 1
|
|
||||||
|
|
||||||
- ansible.builtin.include_tasks: jails/minio-init.yml
|
|
||||||
when: minio_data_exists.rc == 1
|
|
||||||
|
|
||||||
- ansible.builtin.include_tasks: jails/minio-conf.yml
|
|
||||||
tags:
|
|
||||||
- certificates
|
|
||||||
|
|
||||||
- ansible.builtin.shell:
|
|
||||||
cmd: test -f /mnt/{{ postgresql_pool_name }}/postgresql/data{{ hostvars[item]['postgresql_version'] }}/postgresql.conf
|
|
||||||
loop: "{{ groups['truenas-jails'] | select('search', 'postgresql') | list }}"
|
|
||||||
register: postgresql_data_exists
|
|
||||||
become: true
|
|
||||||
changed_when: false
|
|
||||||
failed_when: postgresql_data_exists.rc != 0 and postgresql_data_exists.rc != 1
|
|
||||||
|
|
||||||
- ansible.builtin.include_tasks: jails/postgresql-init.yml
|
|
||||||
loop: "{{ postgresql_data_exists.results }}"
|
|
||||||
loop_control:
|
|
||||||
loop_var: outside_item
|
|
||||||
when: outside_item.rc == 1
|
|
||||||
|
|
||||||
- ansible.builtin.include_tasks: jails/postgresql-conf.yml
|
|
||||||
loop: "{{ postgresql_data_exists.results }}"
|
|
||||||
loop_control:
|
|
||||||
loop_var: outside_item
|
|
||||||
tags:
|
|
||||||
- certificates
|
|
||||||
when: "main_nas"
|
|
@@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
- name: scripts | copy scripts
|
|
||||||
ansible.builtin.copy:
|
|
||||||
src: "scripts/{{ item }}"
|
|
||||||
dest: "{{ scripts_dir }}/{{ item }}"
|
|
||||||
mode: 0755
|
|
||||||
loop:
|
|
||||||
- snapshots_clearempty.py
|
|
||||||
- snapshots_prune.py
|
|
||||||
- telegraf_hddtemp.bash
|
|
||||||
|
|
||||||
- name: scripts | template scripts
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: "scripts/{{ item.name }}"
|
|
||||||
dest: "{{ scripts_dir }}/{{ item.name }}"
|
|
||||||
mode: "{{ item.mode }}"
|
|
||||||
loop:
|
|
||||||
- { name: "jails_update.sh", mode: "0775" }
|
|
||||||
- { name: "snapshots_prune.sh", mode: "0775" }
|
|
@@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
- name: scrutiny | Fetch the latest release information from GitHub
|
|
||||||
ansible.builtin.uri:
|
|
||||||
url: https://api.github.com/repos/AnalogJ/scrutiny/releases/latest
|
|
||||||
method: GET
|
|
||||||
return_content: yes
|
|
||||||
headers:
|
|
||||||
Accept: application/vnd.github.v3+json
|
|
||||||
register: github_release_info
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: scrutiny | Set the download URL for the FreeBSD binary
|
|
||||||
set_fact:
|
|
||||||
scrutiny_binary_url: "{{ item.browser_download_url }}"
|
|
||||||
loop: "{{ github_release_info.json.assets }}"
|
|
||||||
when: "'scrutiny-collector-metrics-freebsd-amd64' in item.name"
|
|
||||||
no_log: true
|
|
||||||
|
|
||||||
|
|
||||||
- name: scrutiny | Download the latest scrutiny FreeBSD binary
|
|
||||||
ansible.builtin.get_url:
|
|
||||||
url: "{{ scrutiny_binary_url }}"
|
|
||||||
dest: "{{ scrutiny_dir }}/{{ scrutiny_bin }}"
|
|
||||||
mode: 0775
|
|
||||||
when: scrutiny_binary_url is defined
|
|
||||||
|
|
||||||
- name: scrutiny | deploy scripts
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: "scripts/{{ item.name }}"
|
|
||||||
dest: "{{ scripts_dir }}/{{ item.name }}"
|
|
||||||
mode: "{{ item.mode }}"
|
|
||||||
loop:
|
|
||||||
- { name: "scrutiny_collector.sh", mode: "0775" }
|
|
@@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
- name: telegraf | clone git repository
|
|
||||||
ansible.builtin.git:
|
|
||||||
repo: https://github.com/samuelkadolph/truenas-telegraf
|
|
||||||
dest: "{{ telegraf_dir }}"
|
|
||||||
version: main
|
|
||||||
|
|
||||||
- name: telegraf | copy configuration
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: telegraf/telegraf.conf
|
|
||||||
dest: "{{ telegraf_dir }}/telegraf.conf"
|
|
||||||
mode: 0775
|
|
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
- name: wireguard | configuration
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: "{{ item.src }}"
|
|
||||||
dest: "{{ item.dest }}"
|
|
||||||
mode: "{{ item.mode }}"
|
|
||||||
loop:
|
|
||||||
- {
|
|
||||||
src: "wireguard/{{ ansible_facts['nodename'] }}.conf",
|
|
||||||
dest: "{{ homelab_homedir }}/{{ wg_interface }}.conf",
|
|
||||||
mode: 400,
|
|
||||||
}
|
|
||||||
- {
|
|
||||||
src: "wireguard/ip-check.bash",
|
|
||||||
dest: "{{ homelab_homedir }}/wireguard-ip-check.bash",
|
|
||||||
mode: 700,
|
|
||||||
}
|
|
@@ -1,97 +0,0 @@
|
|||||||
# PostgreSQL Client Authentication Configuration File
|
|
||||||
# ===================================================
|
|
||||||
#
|
|
||||||
# Refer to the "Client Authentication" section in the PostgreSQL
|
|
||||||
# documentation for a complete description of this file. A short
|
|
||||||
# synopsis follows.
|
|
||||||
#
|
|
||||||
# This file controls: which hosts are allowed to connect, how clients
|
|
||||||
# are authenticated, which PostgreSQL user names they can use, which
|
|
||||||
# databases they can access. Records take one of these forms:
|
|
||||||
#
|
|
||||||
# local DATABASE USER METHOD [OPTIONS]
|
|
||||||
# host DATABASE USER ADDRESS METHOD [OPTIONS]
|
|
||||||
# hostssl DATABASE USER ADDRESS METHOD [OPTIONS]
|
|
||||||
# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS]
|
|
||||||
# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS]
|
|
||||||
# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS]
|
|
||||||
#
|
|
||||||
# (The uppercase items must be replaced by actual values.)
|
|
||||||
#
|
|
||||||
# The first field is the connection type:
|
|
||||||
# - "local" is a Unix-domain socket
|
|
||||||
# - "host" is a TCP/IP socket (encrypted or not)
|
|
||||||
# - "hostssl" is a TCP/IP socket that is SSL-encrypted
|
|
||||||
# - "hostnossl" is a TCP/IP socket that is not SSL-encrypted
|
|
||||||
# - "hostgssenc" is a TCP/IP socket that is GSSAPI-encrypted
|
|
||||||
# - "hostnogssenc" is a TCP/IP socket that is not GSSAPI-encrypted
|
|
||||||
#
|
|
||||||
# DATABASE can be "all", "sameuser", "samerole", "replication", a
|
|
||||||
# database name, or a comma-separated list thereof. The "all"
|
|
||||||
# keyword does not match "replication". Access to replication
|
|
||||||
# must be enabled in a separate record (see example below).
|
|
||||||
#
|
|
||||||
# USER can be "all", a user name, a group name prefixed with "+", or a
|
|
||||||
# comma-separated list thereof. In both the DATABASE and USER fields
|
|
||||||
# you can also write a file name prefixed with "@" to include names
|
|
||||||
# from a separate file.
|
|
||||||
#
|
|
||||||
# ADDRESS specifies the set of hosts the record matches. It can be a
|
|
||||||
# host name, or it is made up of an IP address and a CIDR mask that is
|
|
||||||
# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
|
|
||||||
# specifies the number of significant bits in the mask. A host name
|
|
||||||
# that starts with a dot (.) matches a suffix of the actual host name.
|
|
||||||
# Alternatively, you can write an IP address and netmask in separate
|
|
||||||
# columns to specify the set of hosts. Instead of a CIDR-address, you
|
|
||||||
# can write "samehost" to match any of the server's own IP addresses,
|
|
||||||
# or "samenet" to match any address in any subnet that the server is
|
|
||||||
# directly connected to.
|
|
||||||
#
|
|
||||||
# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256",
|
|
||||||
# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert".
|
|
||||||
# Note that "password" sends passwords in clear text; "md5" or
|
|
||||||
# "scram-sha-256" are preferred since they send encrypted passwords.
|
|
||||||
#
|
|
||||||
# OPTIONS are a set of options for the authentication in the format
|
|
||||||
# NAME=VALUE. The available options depend on the different
|
|
||||||
# authentication methods -- refer to the "Client Authentication"
|
|
||||||
# section in the documentation for a list of which options are
|
|
||||||
# available for which authentication methods.
|
|
||||||
#
|
|
||||||
# Database and user names containing spaces, commas, quotes and other
|
|
||||||
# special characters must be quoted. Quoting one of the keywords
|
|
||||||
# "all", "sameuser", "samerole" or "replication" makes the name lose
|
|
||||||
# its special character, and just match a database or username with
|
|
||||||
# that name.
|
|
||||||
#
|
|
||||||
# This file is read on server startup and when the server receives a
|
|
||||||
# SIGHUP signal. If you edit the file on a running system, you have to
|
|
||||||
# SIGHUP the server for the changes to take effect, run "pg_ctl reload",
|
|
||||||
# or execute "SELECT pg_reload_conf()".
|
|
||||||
#
|
|
||||||
# Put your actual configuration here
|
|
||||||
# ----------------------------------
|
|
||||||
#
|
|
||||||
# If you want to allow non-local connections, you need to add more
|
|
||||||
# "host" records. In that case you will also need to make PostgreSQL
|
|
||||||
# listen on a non-local interface via the listen_addresses
|
|
||||||
# configuration parameter, or via the -i or -h command line switches.
|
|
||||||
|
|
||||||
# CAUTION: Configuring the system for local "trust" authentication
|
|
||||||
# allows any local user to connect as any PostgreSQL user, including
|
|
||||||
# the database superuser. If you do not trust all your local users,
|
|
||||||
# use another authentication method.
|
|
||||||
|
|
||||||
|
|
||||||
# TYPE DATABASE USER ADDRESS METHOD
|
|
||||||
|
|
||||||
# "local" is for Unix domain socket connections only
|
|
||||||
local all all trust
|
|
||||||
# IPv4 local connections:
|
|
||||||
hostssl all all 0.0.0.0/0 scram-sha-256
|
|
||||||
# IPv6 local connections:
|
|
||||||
# Allow replication connections from localhost, by a user with the
|
|
||||||
# replication privilege.
|
|
||||||
local replication all trust
|
|
||||||
host replication all 127.0.0.1/32 trust
|
|
||||||
host replication all ::1/128 trust
|
|
@@ -1,14 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# DEBUG
|
|
||||||
# set -x
|
|
||||||
|
|
||||||
# Variables
|
|
||||||
VERSION=$(freebsd-version | sed 's|STABLE|RELEASE|g')
|
|
||||||
JAILS=$(iocage list --header | awk '{ print $2 }')
|
|
||||||
|
|
||||||
for jail in $JAILS; do
|
|
||||||
iocage update $jail
|
|
||||||
iocage exec $jail 'pkg update'
|
|
||||||
iocage exec $jail 'pkg upgrade --yes'
|
|
||||||
done
|
|
@@ -1,10 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# DEBUG
|
|
||||||
# set -x
|
|
||||||
|
|
||||||
# Variables
|
|
||||||
BIN_PATH="{{ scrutiny_dir }}/{{ scrutiny_bin }}"
|
|
||||||
HOSTNAME=$(hostname)
|
|
||||||
|
|
||||||
$BIN_PATH run --host-id=${HOSTNAME} --api-endpoint=https://scrutiny.{{ SECRET_EXTERNAL_DOMAIN }}
|
|
@@ -1,17 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# DEBUG
|
|
||||||
# set -x
|
|
||||||
|
|
||||||
# Variables
|
|
||||||
SCRIPT_PATH="{{ scripts_dir }}"
|
|
||||||
INTERVAL="{{ snapshots_interval }}"
|
|
||||||
POOL_NAME="{{ pool_name }}"
|
|
||||||
|
|
||||||
# Prune
|
|
||||||
|
|
||||||
${SCRIPT_PATH}/snapshots_prune.py --recursive --intervals ${INTERVAL} ${POOL_NAME}
|
|
||||||
${SCRIPT_PATH}/snapshots_prune.py --recursive --intervals daily:14 ${POOL_NAME}{% if not main_nas %}/replication/storage{% endif %}/minio_v2
|
|
||||||
{% if main_nas %}${SCRIPT_PATH}/snapshots_prune.py --recursive --intervals daily:7 ${POOL_NAME}/video{% endif %}
|
|
||||||
|
|
||||||
${SCRIPT_PATH}/snapshots_clearempty.py --recursive ${POOL_NAME}
|
|
@@ -1,49 +0,0 @@
|
|||||||
[agent]
|
|
||||||
interval = "20s"
|
|
||||||
round_interval = true
|
|
||||||
metric_batch_size = 1000
|
|
||||||
metric_buffer_limit = 10000
|
|
||||||
collection_jitter = "0s"
|
|
||||||
flush_interval = "30s"
|
|
||||||
flush_jitter = "0s"
|
|
||||||
precision = ""
|
|
||||||
debug = false
|
|
||||||
quiet = false
|
|
||||||
hostname = "{{ ansible_facts['nodename'] }}"
|
|
||||||
omit_hostname = false
|
|
||||||
|
|
||||||
[[outputs.prometheus_client]]
|
|
||||||
listen = ":9273"
|
|
||||||
metric_version = 2
|
|
||||||
path = "/metrics"
|
|
||||||
string_as_label = true
|
|
||||||
expiration_interval = "60m"
|
|
||||||
|
|
||||||
[[inputs.cpu]]
|
|
||||||
percpu = true
|
|
||||||
totalcpu = true
|
|
||||||
|
|
||||||
[[inputs.diskio]]
|
|
||||||
|
|
||||||
[[inputs.exec]]
|
|
||||||
commands = ["{{ telegraf_dir }}/cputemp"]
|
|
||||||
data_format = "influx"
|
|
||||||
|
|
||||||
[[inputs.exec]]
|
|
||||||
commands = ["{{ scripts_dir }}/telegraf_hddtemp.bash"]
|
|
||||||
name_override = "disktemp"
|
|
||||||
timeout = "5s"
|
|
||||||
data_format = "json"
|
|
||||||
tag_keys = ["disk"]
|
|
||||||
|
|
||||||
[[inputs.mem]]
|
|
||||||
|
|
||||||
[[inputs.net]]
|
|
||||||
interfaces = ["em0", "igb0"]
|
|
||||||
|
|
||||||
[[inputs.system]]
|
|
||||||
|
|
||||||
[[inputs.netstat]]
|
|
||||||
|
|
||||||
[[inputs.zfs]]
|
|
||||||
poolMetrics = true
|
|
@@ -1,24 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Check status of interface
|
|
||||||
# {{ wg_interface }}: name of the interface to check
|
|
||||||
# {{ dns_hostname }}: the name of the peer whose IP should be checked
|
|
||||||
|
|
||||||
cip=$(wg show {{ wg_interface }} endpoints | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}")
|
|
||||||
echo "Wireguard peer IP from Interface: $cip"
|
|
||||||
pingip=$(ping -c 1 {{ ping_ip }} &> /dev/null && echo success || echo fail) #change ip to target server
|
|
||||||
digIP=$(dig +short {{ dns_hostname }}) #the peer address must be set
|
|
||||||
echo "$digIP"
|
|
||||||
if [ "$digIP" != "$cip" ]
|
|
||||||
then
|
|
||||||
echo "IPs doesn't match, restarting wireguard"
|
|
||||||
wg-quick down {{ homelab_homedir }}/{{ wg_interface }}.conf
|
|
||||||
wg-quick up {{ homelab_homedir }}/{{ wg_interface }}.conf
|
|
||||||
elif [ "$pingip" != "success" ]
|
|
||||||
then
|
|
||||||
echo "Ping failed, restarting wireguard..."
|
|
||||||
wg-quick down {{ homelab_homedir }}/{{ wg_interface }}.conf
|
|
||||||
wg-quick up {{ homelab_homedir }}/{{ wg_interface }}.conf
|
|
||||||
else
|
|
||||||
echo "OK"
|
|
||||||
#nothing else todo
|
|
||||||
fi
|
|
@@ -1,11 +0,0 @@
|
|||||||
[Interface]
|
|
||||||
Address = 10.10.0.2/32
|
|
||||||
ListenPort = 51820
|
|
||||||
PrivateKey = {{ wireguard_private_key }}
|
|
||||||
DNS = 10.10.0.1
|
|
||||||
|
|
||||||
[Peer]
|
|
||||||
PublicKey = K7kgSuPwH2NA7FeLHwvGMX02kvhD8DxHgL/wflsgx34=
|
|
||||||
AllowedIPs = 0.0.0.0/0
|
|
||||||
Endpoint = services.{{ secret_domain }}:51820
|
|
||||||
PersistentKeepalive = 25
|
|
@@ -1 +0,0 @@
|
|||||||
jail_missing: false
|
|
Reference in New Issue
Block a user